diff --git a/benches/main.rs b/benches/main.rs index 0198c2943..c558ca3c6 100644 --- a/benches/main.rs +++ b/benches/main.rs @@ -34,7 +34,7 @@ fn ints_json(bench: &mut Bencher) { let validator = build_schema_validator(py, c"{'type': 'int'}"); let result = validator - .validate_json(py, &json(py, "123"), None, None, None, false.into()) + .validate_json(py, &json(py, "123"), None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, 123); @@ -42,7 +42,7 @@ fn ints_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &json(py, "123"), None, None, None, false.into()) + .validate_json(py, &json(py, "123"), None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -56,7 +56,7 @@ fn ints_python(bench: &mut Bencher) { let Ok(input) = 123_i64.into_pyobject(py); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, 123); @@ -65,7 +65,7 @@ fn ints_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -84,7 +84,7 @@ fn list_int_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &json(py, &code), None, None, None, false.into()) + .validate_json(py, &json(py, &code), None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -110,7 +110,7 @@ fn list_int_python(bench: &mut Bencher) { let input = black_box(input.bind(py)); bench.iter(|| { let v = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); black_box(v) }) @@ -122,11 +122,15 @@ fn list_int_python_isinstance(bench: &mut Bencher) { Python::with_gil(|py| { let (validator, input) = list_int_input(py); let input = black_box(input.bind(py)); - let v = validator.isinstance_python(py, &input, None, None, None, None).unwrap(); + let v = validator + .isinstance_python(py, &input, None, None, None, None, None, None) + .unwrap(); assert!(v); bench.iter(|| { - let v = validator.isinstance_python(py, &input, None, None, None, None).unwrap(); + let v = validator + .isinstance_python(py, &input, None, None, None, None, None, None) + .unwrap(); black_box(v) }) }) @@ -144,7 +148,7 @@ fn list_error_json(bench: &mut Bencher) { .join(", ") ); - match validator.validate_json(py, &json(py, &code), None, None, None, false.into()) { + match validator.validate_json(py, &json(py, &code), None, None, None, false.into(), None, None) { Ok(_) => panic!("unexpectedly valid"), Err(e) => { let v = e.value(py); @@ -156,7 +160,7 @@ fn list_error_json(bench: &mut Bencher) { }; bench.iter( - || match validator.validate_json(py, &json(py, &code), None, None, None, false.into()) { + || match validator.validate_json(py, &json(py, &code), None, None, None, false.into(), None, None) { Ok(_) => panic!("unexpectedly valid"), Err(e) => black_box(e), }, @@ -177,7 +181,7 @@ fn list_error_python_input(py: Python<'_>) -> (SchemaValidator, PyObject) { let input = py.eval(&code, None, None).unwrap().extract().unwrap(); - match validator.validate_python(py, &input, None, None, None, None, false.into()) { + match validator.validate_python(py, &input, None, None, None, None, false.into(), None, None) { Ok(_) => panic!("unexpectedly valid"), Err(e) => { let v = e.value(py); @@ -197,7 +201,7 @@ fn list_error_python(bench: &mut Bencher) { let input = black_box(input.bind(py)); bench.iter(|| { - let result = validator.validate_python(py, &input, None, None, None, None, false.into()); + let result = validator.validate_python(py, &input, None, None, None, None, false.into(), None, None); match result { Ok(_) => panic!("unexpectedly valid"), @@ -212,11 +216,17 @@ fn list_error_python_isinstance(bench: &mut Bencher) { Python::with_gil(|py| { let (validator, input) = list_error_python_input(py); let input = black_box(input.bind(py)); - let r = validator.isinstance_python(py, &input, None, None, None, None).unwrap(); + let r = validator + .isinstance_python(py, &input, None, None, None, None, None, None) + .unwrap(); assert!(!r); bench.iter(|| { - black_box(validator.isinstance_python(py, &input, None, None, None, None).unwrap()); + black_box( + validator + .isinstance_python(py, &input, None, None, None, None, None, None) + .unwrap(), + ); }) }) } @@ -233,7 +243,7 @@ fn list_any_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &json(py, &code), None, None, None, false.into()) + .validate_json(py, &json(py, &code), None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -253,7 +263,7 @@ fn list_any_python(bench: &mut Bencher) { let input = black_box(input); bench.iter(|| { let v = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); black_box(v) }) @@ -287,7 +297,7 @@ fn dict_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &json(py, &code), None, None, None, false.into()) + .validate_json(py, &json(py, &code), None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -314,7 +324,7 @@ fn dict_python(bench: &mut Bencher) { let input = black_box(input); bench.iter(|| { let v = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); black_box(v) }) @@ -344,7 +354,7 @@ fn dict_value_error(bench: &mut Bencher) { let input = py.eval(&code, None, None).unwrap(); - match validator.validate_python(py, &input, None, None, None, None, false.into()) { + match validator.validate_python(py, &input, None, None, None, None, false.into(), None, None) { Ok(_) => panic!("unexpectedly valid"), Err(e) => { let v = e.value(py); @@ -357,7 +367,7 @@ fn dict_value_error(bench: &mut Bencher) { let input = black_box(input); bench.iter(|| { - let result = validator.validate_python(py, &input, None, None, None, None, false.into()); + let result = validator.validate_python(py, &input, None, None, None, None, false.into(), None, None); match result { Ok(_) => panic!("unexpectedly valid"), @@ -395,7 +405,7 @@ fn typed_dict_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &json(py, code), None, None, None, false.into()) + .validate_json(py, &json(py, code), None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -430,7 +440,7 @@ fn typed_dict_python(bench: &mut Bencher) { let input = black_box(input); bench.iter(|| { let v = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); black_box(v) }) @@ -471,7 +481,7 @@ fn typed_dict_deep_error(bench: &mut Bencher) { let input = py.eval(code, None, None).unwrap(); let input = black_box(input); - match validator.validate_python(py, &input, None, None, None, None, false.into()) { + match validator.validate_python(py, &input, None, None, None, None, false.into(), None, None) { Ok(_) => panic!("unexpectedly valid"), Err(e) => { let v = e.value(py); @@ -483,7 +493,7 @@ fn typed_dict_deep_error(bench: &mut Bencher) { }; bench.iter(|| { - let result = validator.validate_python(py, &input, None, None, None, None, false.into()); + let result = validator.validate_python(py, &input, None, None, None, None, false.into(), None, None); match result { Ok(_) => panic!("unexpectedly valid"), @@ -510,7 +520,7 @@ fn complete_model(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ); }) @@ -532,13 +542,13 @@ fn nested_model_using_definitions(bench: &mut Bencher) { let input = black_box(input); validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ); }) @@ -560,13 +570,13 @@ fn nested_model_inlined(bench: &mut Bencher) { let input = black_box(input); validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ); }) @@ -580,7 +590,7 @@ fn literal_ints_few_python(bench: &mut Bencher) { let Ok(input) = 4_i64.into_pyobject(py); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, 4); @@ -589,7 +599,7 @@ fn literal_ints_few_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -604,7 +614,7 @@ fn literal_strings_few_small_python(bench: &mut Bencher) { let input = py.eval(c"'4'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -613,7 +623,7 @@ fn literal_strings_few_small_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -631,7 +641,7 @@ fn literal_strings_few_large_python(bench: &mut Bencher) { let input = py.eval(c"'a' * 25 + '4'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -640,7 +650,7 @@ fn literal_strings_few_large_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -674,7 +684,7 @@ class Foo(Enum): let input = py.eval(c"Foo.v4", Some(&globals), None).unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); assert!(input.eq(result).unwrap()); @@ -682,7 +692,7 @@ class Foo(Enum): bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -696,7 +706,7 @@ fn literal_ints_many_python(bench: &mut Bencher) { let Ok(input) = 99_i64.into_pyobject(py); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, 99); @@ -705,7 +715,7 @@ fn literal_ints_many_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -721,7 +731,7 @@ fn literal_strings_many_small_python(bench: &mut Bencher) { let input = py.eval(c"'99'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -730,7 +740,7 @@ fn literal_strings_many_small_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -748,7 +758,7 @@ fn literal_strings_many_large_python(bench: &mut Bencher) { let input = py.eval(c"'a' * 25 + '99'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -757,7 +767,7 @@ fn literal_strings_many_large_python(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -771,7 +781,7 @@ fn literal_ints_many_json(bench: &mut Bencher) { let input_json = py.eval(c"'99'", None, None).unwrap(); let result = validator - .validate_json(py, &input_json, None, None, None, false.into()) + .validate_json(py, &input_json, None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, 99); @@ -780,7 +790,7 @@ fn literal_ints_many_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &input_json, None, None, None, false.into()) + .validate_json(py, &input_json, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -799,7 +809,7 @@ fn literal_strings_many_large_json(bench: &mut Bencher) { let input_json = py.eval(c"'\"' + 'a' * 25 + '99' + '\"'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_json(py, &input_json, None, None, None, false.into()) + .validate_json(py, &input_json, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -808,7 +818,7 @@ fn literal_strings_many_large_json(bench: &mut Bencher) { bench.iter(|| { black_box( validator - .validate_json(py, &input_json, None, None, None, false.into()) + .validate_json(py, &input_json, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -844,7 +854,7 @@ class Foo(Enum): let input = py.eval(c"'null'", None, None).unwrap(); let input_str: String = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_str: String = result.extract(py).unwrap(); assert_eq!(result_str, input_str); @@ -853,7 +863,7 @@ class Foo(Enum): bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -864,7 +874,7 @@ class Foo(Enum): let input = py.eval(c"-1", None, None).unwrap(); let input_int: i64 = input.extract().unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); let result_int: i64 = result.extract(py).unwrap(); assert_eq!(result_int, input_int); @@ -873,7 +883,7 @@ class Foo(Enum): bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -883,7 +893,7 @@ class Foo(Enum): { let input = py.eval(c"None", None, None).unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); assert!(input.eq(result).unwrap()); @@ -891,7 +901,7 @@ class Foo(Enum): bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) @@ -901,7 +911,7 @@ class Foo(Enum): { let input = py.eval(c"Foo.v4", Some(&globals), None).unwrap(); let result = validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(); assert!(input.eq(result).unwrap()); @@ -909,7 +919,7 @@ class Foo(Enum): bench.iter(|| { black_box( validator - .validate_python(py, &input, None, None, None, None, false.into()) + .validate_python(py, &input, None, None, None, None, false.into(), None, None) .unwrap(), ) }) diff --git a/python/pydantic_core/_pydantic_core.pyi b/python/pydantic_core/_pydantic_core.pyi index 7245c37f7..3220b3ef8 100644 --- a/python/pydantic_core/_pydantic_core.pyi +++ b/python/pydantic_core/_pydantic_core.pyi @@ -97,6 +97,8 @@ class SchemaValidator: context: Any | None = None, self_instance: Any | None = None, allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, + by_alias: bool | None = None, + by_name: bool | None = None, ) -> Any: """ Validate a Python object against the schema and return the validated object. @@ -114,6 +116,8 @@ class SchemaValidator: allow_partial: Whether to allow partial validation; if `True` errors in the last element of sequences and mappings are ignored. `'trailing-strings'` means any final unfinished JSON string is included in the result. + by_alias: Whether to use the field's alias when validating against the provided input data. + by_name: Whether to use the field's name when validating against the provided input data. Raises: ValidationError: If validation fails. @@ -130,6 +134,8 @@ class SchemaValidator: from_attributes: bool | None = None, context: Any | None = None, self_instance: Any | None = None, + by_alias: bool | None = None, + by_name: bool | None = None, ) -> bool: """ Similar to [`validate_python()`][pydantic_core.SchemaValidator.validate_python] but returns a boolean. @@ -148,6 +154,8 @@ class SchemaValidator: context: Any | None = None, self_instance: Any | None = None, allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, + by_alias: bool | None = None, + by_name: bool | None = None, ) -> Any: """ Validate JSON data directly against the schema and return the validated Python object. @@ -168,6 +176,8 @@ class SchemaValidator: allow_partial: Whether to allow partial validation; if `True` incomplete JSON will be parsed successfully and errors in the last element of sequences and mappings are ignored. `'trailing-strings'` means any final unfinished JSON string is included in the result. + by_alias: Whether to use the field's alias when validating against the provided input data. + by_name: Whether to use the field's name when validating against the provided input data. Raises: ValidationError: If validation fails or if the JSON data is invalid. @@ -183,6 +193,8 @@ class SchemaValidator: strict: bool | None = None, context: Any | None = None, allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, + by_alias: bool | None = None, + by_name: bool | None = None, ) -> Any: """ Validate a string against the schema and return the validated Python object. @@ -199,6 +211,8 @@ class SchemaValidator: allow_partial: Whether to allow partial validation; if `True` errors in the last element of sequences and mappings are ignored. `'trailing-strings'` means any final unfinished JSON string is included in the result. + by_alias: Whether to use the field's alias when validating against the provided input data. + by_name: Whether to use the field's name when validating against the provided input data. Raises: ValidationError: If validation fails or if the JSON data is invalid. @@ -216,6 +230,8 @@ class SchemaValidator: strict: bool | None = None, from_attributes: bool | None = None, context: Any | None = None, + by_alias: bool | None = None, + by_name: bool | None = None, ) -> dict[str, Any] | tuple[dict[str, Any], dict[str, Any] | None, set[str]]: """ Validate an assignment to a field on a model. @@ -230,6 +246,8 @@ class SchemaValidator: If `None`, the value of [`CoreConfig.from_attributes`][pydantic_core.core_schema.CoreConfig] is used. context: The context to use for validation, this is passed to functional validators as [`info.context`][pydantic_core.core_schema.ValidationInfo.context]. + by_alias: Whether to use the field's alias when validating against the provided input data. + by_name: Whether to use the field's name when validating against the provided input data. Raises: ValidationError: If validation fails. @@ -283,7 +301,7 @@ class SchemaSerializer: mode: str | None = None, include: _IncEx | None = None, exclude: _IncEx | None = None, - by_alias: bool = True, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, @@ -329,7 +347,7 @@ class SchemaSerializer: indent: int | None = None, include: _IncEx | None = None, exclude: _IncEx | None = None, - by_alias: bool = True, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, @@ -374,7 +392,7 @@ def to_json( indent: int | None = None, include: _IncEx | None = None, exclude: _IncEx | None = None, - by_alias: bool = True, + by_alias: bool | None = None, exclude_none: bool = False, round_trip: bool = False, timedelta_mode: Literal['iso8601', 'float'] = 'iso8601', @@ -450,7 +468,7 @@ def to_jsonable_python( *, include: _IncEx | None = None, exclude: _IncEx | None = None, - by_alias: bool = True, + by_alias: bool | None = None, exclude_none: bool = False, round_trip: bool = False, timedelta_mode: Literal['iso8601', 'float'] = 'iso8601', diff --git a/python/pydantic_core/core_schema.py b/python/pydantic_core/core_schema.py index e999bdcfc..d4bcdb683 100644 --- a/python/pydantic_core/core_schema.py +++ b/python/pydantic_core/core_schema.py @@ -54,8 +54,6 @@ class CoreConfig(TypedDict, total=False): `field_names` to construct error `loc`s. Default is `True`. revalidate_instances: Whether instances of models and dataclasses should re-validate. Default is 'never'. validate_default: Whether to validate default values during validation. Default is `False`. - populate_by_name: Whether an aliased field may be populated by its name as given by the model attribute, - as well as the alias. (Replaces 'allow_population_by_field_name' in Pydantic v1.) Default is `False`. str_max_length: The maximum length for string fields. str_min_length: The minimum length for string fields. str_strip_whitespace: Whether to strip whitespace from string fields. @@ -74,6 +72,9 @@ class CoreConfig(TypedDict, total=False): regex_engine: The regex engine to use for regex pattern validation. Default is 'rust-regex'. See `StringSchema`. cache_strings: Whether to cache strings. Default is `True`, `True` or `'all'` is required to cache strings during general validation since validators don't know if they're in a key or a value. + validate_by_alias: Whether to use the field's alias when validating against the provided input data. Default is `True`. + validate_by_name: Whether to use the field's name when validating against the provided input data. Default is `False`. Replacement for `populate_by_name`. + serialize_by_alias: Whether to serialize by alias. Default is `False`, expected to change to `True` in V3. """ title: str @@ -91,7 +92,6 @@ class CoreConfig(TypedDict, total=False): # whether to validate default values during validation, default False validate_default: bool # used on typed-dicts and arguments - populate_by_name: bool # replaces `allow_population_by_field_name` in pydantic v1 # fields related to string fields only str_max_length: int str_min_length: int @@ -111,6 +111,9 @@ class CoreConfig(TypedDict, total=False): coerce_numbers_to_str: bool # default: False regex_engine: Literal['rust-regex', 'python-re'] # default: 'rust-regex' cache_strings: Union[bool, Literal['all', 'keys', 'none']] # default: 'True' + validate_by_alias: bool # default: True + validate_by_name: bool # default: False + serialize_by_alias: bool # default: False IncExCall: TypeAlias = 'set[int | str] | dict[int | str, IncExCall] | None' @@ -2888,7 +2891,6 @@ class TypedDictSchema(TypedDict, total=False): # all these values can be set via config, equivalent fields have `typed_dict_` prefix extra_behavior: ExtraBehavior total: bool # default: True - populate_by_name: bool # replaces `allow_population_by_field_name` in pydantic v1 ref: str metadata: dict[str, Any] serialization: SerSchema @@ -2904,7 +2906,6 @@ def typed_dict_schema( extras_schema: CoreSchema | None = None, extra_behavior: ExtraBehavior | None = None, total: bool | None = None, - populate_by_name: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, serialization: SerSchema | None = None, @@ -2938,7 +2939,6 @@ class MyTypedDict(TypedDict): metadata: Any other information you want to include with the schema, not used by pydantic-core extra_behavior: The extra behavior to use for the typed dict total: Whether the typed dict is total, otherwise uses `typed_dict_total` from config - populate_by_name: Whether the typed dict should populate by name serialization: Custom serialization schema """ return _dict_not_none( @@ -2950,7 +2950,6 @@ class MyTypedDict(TypedDict): extras_schema=extras_schema, extra_behavior=extra_behavior, total=total, - populate_by_name=populate_by_name, ref=ref, metadata=metadata, serialization=serialization, @@ -3012,9 +3011,7 @@ class ModelFieldsSchema(TypedDict, total=False): computed_fields: list[ComputedField] strict: bool extras_schema: CoreSchema - # all these values can be set via config, equivalent fields have `typed_dict_` prefix extra_behavior: ExtraBehavior - populate_by_name: bool # replaces `allow_population_by_field_name` in pydantic v1 from_attributes: bool ref: str metadata: dict[str, Any] @@ -3029,7 +3026,6 @@ def model_fields_schema( strict: bool | None = None, extras_schema: CoreSchema | None = None, extra_behavior: ExtraBehavior | None = None, - populate_by_name: bool | None = None, from_attributes: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, @@ -3058,7 +3054,6 @@ def model_fields_schema( ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core extra_behavior: The extra behavior to use for the typed dict - populate_by_name: Whether the typed dict should populate by name from_attributes: Whether the typed dict should be populated from attributes serialization: Custom serialization schema """ @@ -3070,7 +3065,6 @@ def model_fields_schema( strict=strict, extras_schema=extras_schema, extra_behavior=extra_behavior, - populate_by_name=populate_by_name, from_attributes=from_attributes, ref=ref, metadata=metadata, @@ -3254,7 +3248,6 @@ class DataclassArgsSchema(TypedDict, total=False): dataclass_name: Required[str] fields: Required[list[DataclassField]] computed_fields: list[ComputedField] - populate_by_name: bool # default: False collect_init_only: bool # default: False ref: str metadata: dict[str, Any] @@ -3267,7 +3260,6 @@ def dataclass_args_schema( fields: list[DataclassField], *, computed_fields: list[ComputedField] | None = None, - populate_by_name: bool | None = None, collect_init_only: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, @@ -3295,7 +3287,6 @@ def dataclass_args_schema( dataclass_name: The name of the dataclass being validated fields: The fields to use for the dataclass computed_fields: Computed fields to use when serializing the dataclass - populate_by_name: Whether to populate by name collect_init_only: Whether to collect init only fields into a dict to pass to `__post_init__` ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core @@ -3307,7 +3298,6 @@ def dataclass_args_schema( dataclass_name=dataclass_name, fields=fields, computed_fields=computed_fields, - populate_by_name=populate_by_name, collect_init_only=collect_init_only, ref=ref, metadata=metadata, @@ -3436,7 +3426,8 @@ def arguments_parameter( class ArgumentsSchema(TypedDict, total=False): type: Required[Literal['arguments']] arguments_schema: Required[list[ArgumentsParameter]] - populate_by_name: bool + validate_by_name: bool + validate_by_alias: bool var_args_schema: CoreSchema var_kwargs_mode: VarKwargsMode var_kwargs_schema: CoreSchema @@ -3448,7 +3439,8 @@ class ArgumentsSchema(TypedDict, total=False): def arguments_schema( arguments: list[ArgumentsParameter], *, - populate_by_name: bool | None = None, + validate_by_name: bool | None = None, + validate_by_alias: bool | None = None, var_args_schema: CoreSchema | None = None, var_kwargs_mode: VarKwargsMode | None = None, var_kwargs_schema: CoreSchema | None = None, @@ -3475,7 +3467,8 @@ def arguments_schema( Args: arguments: The arguments to use for the arguments schema - populate_by_name: Whether to populate by name + validate_by_name: Whether to populate by the parameter names, defaults to `False`. + validate_by_alias: Whether to populate by the parameter aliases, defaults to `True`. var_args_schema: The variable args schema to use for the arguments schema var_kwargs_mode: The validation mode to use for variadic keyword arguments. If `'uniform'`, every value of the keyword arguments will be validated against the `var_kwargs_schema` schema. If `'unpacked-typed-dict'`, @@ -3488,7 +3481,8 @@ def arguments_schema( return _dict_not_none( type='arguments', arguments_schema=arguments, - populate_by_name=populate_by_name, + validate_by_name=validate_by_name, + validate_by_alias=validate_by_alias, var_args_schema=var_args_schema, var_kwargs_mode=var_kwargs_mode, var_kwargs_schema=var_kwargs_schema, diff --git a/src/errors/validation_exception.rs b/src/errors/validation_exception.rs index 2ce752b42..16e89eb68 100644 --- a/src/errors/validation_exception.rs +++ b/src/errors/validation_exception.rs @@ -344,7 +344,7 @@ impl ValidationError { let extra = state.extra( py, &SerMode::Json, - true, + None, false, false, true, diff --git a/src/lookup_key.rs b/src/lookup_key.rs index c83a00583..9b06db8fa 100644 --- a/src/lookup_key.rs +++ b/src/lookup_key.rs @@ -577,3 +577,47 @@ fn py_get_attrs<'py>(obj: &Bound<'py, PyAny>, attr_name: &Py) -> PyRes } } } + +#[derive(Debug)] +#[allow(clippy::struct_field_names)] +pub struct LookupKeyCollection { + by_name: LookupKey, + by_alias: Option, + by_alias_then_name: Option, +} + +impl LookupKeyCollection { + pub fn new(py: Python, validation_alias: Option>, field_name: &str) -> PyResult { + let by_name = LookupKey::from_string(py, field_name); + + if let Some(va) = validation_alias { + let by_alias = Some(LookupKey::from_py(py, &va, None)?); + let by_alias_then_name = Some(LookupKey::from_py(py, &va, Some(field_name))?); + Ok(Self { + by_name, + by_alias, + by_alias_then_name, + }) + } else { + Ok(Self { + by_name, + by_alias: None, + by_alias_then_name: None, + }) + } + } + + pub fn select(&self, validate_by_alias: bool, validate_by_name: bool) -> PyResult<&LookupKey> { + let lookup_key_selection = match (validate_by_alias, validate_by_name) { + (true, true) => self.by_alias_then_name.as_ref().unwrap_or(&self.by_name), + (true, false) => self.by_alias.as_ref().unwrap_or(&self.by_name), + (false, true) => &self.by_name, + (false, false) => { + // Note: we shouldn't hit this branch much, as this is enforced in `pydantic` with a `PydanticUserError` + // at config creation time / validation function call time. + return py_schema_err!("`validate_by_name` and `validate_by_alias` cannot both be set to `False`."); + } + }; + Ok(lookup_key_selection) + } +} diff --git a/src/serializers/computed_fields.rs b/src/serializers/computed_fields.rs index 3f81a825c..6e6786d73 100644 --- a/src/serializers/computed_fields.rs +++ b/src/serializers/computed_fields.rs @@ -98,7 +98,7 @@ impl ComputedFields { exclude: next_exclude.as_ref(), extra: &field_extra, }; - let key = match extra.by_alias { + let key = match extra.serialize_by_alias_or(computed_field.serialize_by_alias) { true => computed_field.alias.as_str(), false => computed_field.property_name.as_str(), }; @@ -116,6 +116,7 @@ struct ComputedField { serializer: CombinedSerializer, alias: String, alias_py: Py, + serialize_by_alias: Option, } impl ComputedField { @@ -139,6 +140,7 @@ impl ComputedField { serializer, alias: alias_py.extract()?, alias_py: alias_py.into(), + serialize_by_alias: config.get_as(intern!(py, "serialize_by_alias"))?, }) } @@ -163,7 +165,7 @@ impl ComputedField { if extra.exclude_none && value.is_none(py) { return Ok(()); } - let key = match extra.by_alias { + let key = match extra.serialize_by_alias_or(self.serialize_by_alias) { true => self.alias_py.bind(py), false => property_name_py, }; diff --git a/src/serializers/extra.rs b/src/serializers/extra.rs index 295a2de18..a369859fb 100644 --- a/src/serializers/extra.rs +++ b/src/serializers/extra.rs @@ -84,7 +84,7 @@ impl SerializationState { &'py self, py: Python<'py>, mode: &'py SerMode, - by_alias: bool, + by_alias: Option, exclude_none: bool, round_trip: bool, serialize_unknown: bool, @@ -122,7 +122,7 @@ pub(crate) struct Extra<'a> { pub mode: &'a SerMode, pub ob_type_lookup: &'a ObTypeLookup, pub warnings: &'a CollectWarnings, - pub by_alias: bool, + pub by_alias: Option, pub exclude_unset: bool, pub exclude_defaults: bool, pub exclude_none: bool, @@ -147,7 +147,7 @@ impl<'a> Extra<'a> { pub fn new( py: Python<'a>, mode: &'a SerMode, - by_alias: bool, + by_alias: Option, warnings: &'a CollectWarnings, exclude_unset: bool, exclude_defaults: bool, @@ -204,6 +204,10 @@ impl<'a> Extra<'a> { pub(crate) fn model_type_name(&self) -> Option> { self.model.and_then(|model| model.get_type().name().ok()) } + + pub fn serialize_by_alias_or(&self, serialize_by_alias: Option) -> bool { + self.by_alias.or(serialize_by_alias).unwrap_or(false) + } } #[derive(Clone, Copy, PartialEq, Eq)] @@ -228,7 +232,7 @@ impl SerCheck { pub(crate) struct ExtraOwned { mode: SerMode, warnings: CollectWarnings, - by_alias: bool, + by_alias: Option, exclude_unset: bool, exclude_defaults: bool, exclude_none: bool, diff --git a/src/serializers/fields.rs b/src/serializers/fields.rs index 662ec936f..b0da9ba56 100644 --- a/src/serializers/fields.rs +++ b/src/serializers/fields.rs @@ -29,6 +29,7 @@ pub(super) struct SerField { // None serializer means exclude pub serializer: Option, pub required: bool, + pub serialize_by_alias: Option, } impl_py_gc_traverse!(SerField { serializer }); @@ -40,6 +41,7 @@ impl SerField { alias: Option, serializer: Option, required: bool, + serialize_by_alias: Option, ) -> Self { let alias_py = alias.as_ref().map(|alias| PyString::new(py, alias.as_str()).into()); Self { @@ -48,11 +50,12 @@ impl SerField { alias_py, serializer, required, + serialize_by_alias, } } pub fn get_key_py<'py>(&self, py: Python<'py>, extra: &Extra) -> &Bound<'py, PyAny> { - if extra.by_alias { + if extra.serialize_by_alias_or(self.serialize_by_alias) { if let Some(ref alias_py) = self.alias_py { return alias_py.bind(py); } @@ -61,7 +64,7 @@ impl SerField { } pub fn get_key_json<'a>(&'a self, key_str: &'a str, extra: &Extra) -> Cow<'a, str> { - if extra.by_alias { + if extra.serialize_by_alias_or(self.serialize_by_alias) { if let Some(ref alias) = self.alias { return Cow::Borrowed(alias.as_str()); } diff --git a/src/serializers/mod.rs b/src/serializers/mod.rs index e652a75de..6ed496aab 100644 --- a/src/serializers/mod.rs +++ b/src/serializers/mod.rs @@ -54,7 +54,7 @@ impl SchemaSerializer { &'b self, py: Python<'a>, mode: &'a SerMode, - by_alias: bool, + by_alias: Option, warnings: &'a CollectWarnings, exclude_unset: bool, exclude_defaults: bool, @@ -106,7 +106,7 @@ impl SchemaSerializer { } #[allow(clippy::too_many_arguments)] - #[pyo3(signature = (value, *, mode = None, include = None, exclude = None, by_alias = true, + #[pyo3(signature = (value, *, mode = None, include = None, exclude = None, by_alias = None, exclude_unset = false, exclude_defaults = false, exclude_none = false, round_trip = false, warnings = WarningsArg::Bool(true), fallback = None, serialize_as_any = false, context = None))] pub fn to_python( @@ -116,7 +116,7 @@ impl SchemaSerializer { mode: Option<&str>, include: Option<&Bound<'_, PyAny>>, exclude: Option<&Bound<'_, PyAny>>, - by_alias: bool, + by_alias: Option, exclude_unset: bool, exclude_defaults: bool, exclude_none: bool, @@ -155,7 +155,7 @@ impl SchemaSerializer { } #[allow(clippy::too_many_arguments)] - #[pyo3(signature = (value, *, indent = None, include = None, exclude = None, by_alias = true, + #[pyo3(signature = (value, *, indent = None, include = None, exclude = None, by_alias = None, exclude_unset = false, exclude_defaults = false, exclude_none = false, round_trip = false, warnings = WarningsArg::Bool(true), fallback = None, serialize_as_any = false, context = None))] pub fn to_json( @@ -165,7 +165,7 @@ impl SchemaSerializer { indent: Option, include: Option<&Bound<'_, PyAny>>, exclude: Option<&Bound<'_, PyAny>>, - by_alias: bool, + by_alias: Option, exclude_unset: bool, exclude_defaults: bool, exclude_none: bool, @@ -239,7 +239,7 @@ impl SchemaSerializer { #[allow(clippy::too_many_arguments)] #[pyfunction] -#[pyo3(signature = (value, *, indent = None, include = None, exclude = None, by_alias = true, +#[pyo3(signature = (value, *, indent = None, include = None, exclude = None, by_alias = None, exclude_none = false, round_trip = false, timedelta_mode = "iso8601", bytes_mode = "utf8", inf_nan_mode = "constants", serialize_unknown = false, fallback = None, serialize_as_any = false, context = None))] @@ -249,7 +249,7 @@ pub fn to_json( indent: Option, include: Option<&Bound<'_, PyAny>>, exclude: Option<&Bound<'_, PyAny>>, - by_alias: bool, + by_alias: Option, exclude_none: bool, round_trip: bool, timedelta_mode: &str, @@ -282,7 +282,7 @@ pub fn to_json( #[allow(clippy::too_many_arguments)] #[pyfunction] -#[pyo3(signature = (value, *, include = None, exclude = None, by_alias = true, exclude_none = false, round_trip = false, +#[pyo3(signature = (value, *, include = None, exclude = None, by_alias = None, exclude_none = false, round_trip = false, timedelta_mode = "iso8601", bytes_mode = "utf8", inf_nan_mode = "constants", serialize_unknown = false, fallback = None, serialize_as_any = false, context = None))] pub fn to_jsonable_python( @@ -290,7 +290,7 @@ pub fn to_jsonable_python( value: &Bound<'_, PyAny>, include: Option<&Bound<'_, PyAny>>, exclude: Option<&Bound<'_, PyAny>>, - by_alias: bool, + by_alias: Option, exclude_none: bool, round_trip: bool, timedelta_mode: &str, diff --git a/src/serializers/type_serializers/dataclass.rs b/src/serializers/type_serializers/dataclass.rs index 5b3c425df..a080a9b4f 100644 --- a/src/serializers/type_serializers/dataclass.rs +++ b/src/serializers/type_serializers/dataclass.rs @@ -37,6 +37,8 @@ impl BuildSerializer for DataclassArgsBuilder { _ => FieldsMode::SimpleDict, }; + let serialize_by_alias = config.get_as(intern!(py, "serialize_by_alias"))?; + for (index, item) in fields_list.iter().enumerate() { let field_info = item.downcast::()?; let name: String = field_info.get_as_req(intern!(py, "name"))?; @@ -45,14 +47,17 @@ impl BuildSerializer for DataclassArgsBuilder { if !field_info.get_as(intern!(py, "init_only"))?.unwrap_or(false) { if field_info.get_as(intern!(py, "serialization_exclude"))? == Some(true) { - fields.insert(name, SerField::new(py, key_py, None, None, true)); + fields.insert(name, SerField::new(py, key_py, None, None, true, serialize_by_alias)); } else { let schema = field_info.get_as_req(intern!(py, "schema"))?; let serializer = CombinedSerializer::build(&schema, config, definitions) .map_err(|e| py_schema_error_type!("Field `{}`:\n {}", index, e))?; let alias = field_info.get_as(intern!(py, "serialization_alias"))?; - fields.insert(name, SerField::new(py, key_py, alias, Some(serializer), true)); + fields.insert( + name, + SerField::new(py, key_py, alias, Some(serializer), true, serialize_by_alias), + ); } } } diff --git a/src/serializers/type_serializers/function.rs b/src/serializers/type_serializers/function.rs index 3aac20d7c..79317035d 100644 --- a/src/serializers/type_serializers/function.rs +++ b/src/serializers/type_serializers/function.rs @@ -547,7 +547,7 @@ struct SerializationInfo { #[pyo3(get, name = "mode")] _mode: SerMode, #[pyo3(get)] - by_alias: bool, + by_alias: Option, #[pyo3(get)] exclude_unset: bool, #[pyo3(get)] @@ -668,7 +668,7 @@ impl SerializationInfo { None => "None".to_owned(), }, self._mode, - py_bool(self.by_alias), + py_bool(self.by_alias.unwrap_or(false)), py_bool(self.exclude_unset), py_bool(self.exclude_defaults), py_bool(self.exclude_none), diff --git a/src/serializers/type_serializers/model.rs b/src/serializers/type_serializers/model.rs index 198032ed4..274fb48a0 100644 --- a/src/serializers/type_serializers/model.rs +++ b/src/serializers/type_serializers/model.rs @@ -47,6 +47,8 @@ impl BuildSerializer for ModelFieldsBuilder { (_, _) => None, }; + let serialize_by_alias = config.get_as(intern!(py, "serialize_by_alias"))?; + for (key, value) in fields_dict { let key_py = key.downcast_into::()?; let key: String = key_py.extract()?; @@ -55,7 +57,7 @@ impl BuildSerializer for ModelFieldsBuilder { let key_py: Py = key_py.into(); if field_info.get_as(intern!(py, "serialization_exclude"))? == Some(true) { - fields.insert(key, SerField::new(py, key_py, None, None, true)); + fields.insert(key, SerField::new(py, key_py, None, None, true, serialize_by_alias)); } else { let alias: Option = field_info.get_as(intern!(py, "serialization_alias"))?; @@ -63,7 +65,10 @@ impl BuildSerializer for ModelFieldsBuilder { let serializer = CombinedSerializer::build(&schema, config, definitions) .map_err(|e| py_schema_error_type!("Field `{}`:\n {}", key, e))?; - fields.insert(key, SerField::new(py, key_py, alias, Some(serializer), true)); + fields.insert( + key, + SerField::new(py, key_py, alias, Some(serializer), true, serialize_by_alias), + ); } } diff --git a/src/serializers/type_serializers/typed_dict.rs b/src/serializers/type_serializers/typed_dict.rs index e80a9e9b3..d93a88550 100644 --- a/src/serializers/type_serializers/typed_dict.rs +++ b/src/serializers/type_serializers/typed_dict.rs @@ -32,6 +32,8 @@ impl BuildSerializer for TypedDictBuilder { _ => FieldsMode::SimpleDict, }; + let serialize_by_alias = config.get_as(intern!(py, "serialize_by_alias"))?; + let fields_dict: Bound<'_, PyDict> = schema.get_as_req(intern!(py, "fields"))?; let mut fields: AHashMap = AHashMap::with_capacity(fields_dict.len()); @@ -52,14 +54,17 @@ impl BuildSerializer for TypedDictBuilder { let required = field_info.get_as(intern!(py, "required"))?.unwrap_or(total); if field_info.get_as(intern!(py, "serialization_exclude"))? == Some(true) { - fields.insert(key, SerField::new(py, key_py, None, None, required)); + fields.insert(key, SerField::new(py, key_py, None, None, required, serialize_by_alias)); } else { let alias: Option = field_info.get_as(intern!(py, "serialization_alias"))?; let schema = field_info.get_as_req(intern!(py, "schema"))?; let serializer = CombinedSerializer::build(&schema, config, definitions) .map_err(|e| py_schema_error_type!("Field `{}`:\n {}", key, e))?; - fields.insert(key, SerField::new(py, key_py, alias, Some(serializer), required)); + fields.insert( + key, + SerField::new(py, key_py, alias, Some(serializer), required, serialize_by_alias), + ); } } diff --git a/src/url.rs b/src/url.rs index 692ac80c2..8d31fb27e 100644 --- a/src/url.rs +++ b/src/url.rs @@ -45,7 +45,7 @@ impl PyUrl { pub fn py_new(py: Python, url: &Bound<'_, PyAny>) -> PyResult { let schema_obj = SCHEMA_DEFINITION_URL .get_or_init(py, || build_schema_validator(py, "url")) - .validate_python(py, url, None, None, None, None, false.into())?; + .validate_python(py, url, None, None, None, None, false.into(), None, None)?; schema_obj.extract(py) } @@ -225,7 +225,7 @@ impl PyMultiHostUrl { pub fn py_new(py: Python, url: &Bound<'_, PyAny>) -> PyResult { let schema_obj = SCHEMA_DEFINITION_MULTI_HOST_URL .get_or_init(py, || build_schema_validator(py, "multi-host-url")) - .validate_python(py, url, None, None, None, None, false.into())?; + .validate_python(py, url, None, None, None, None, false.into(), None, None)?; schema_obj.extract(py) } diff --git a/src/validators/arguments.rs b/src/validators/arguments.rs index ef129610a..7c397d69f 100644 --- a/src/validators/arguments.rs +++ b/src/validators/arguments.rs @@ -11,7 +11,7 @@ use crate::build_tools::py_schema_err; use crate::build_tools::{schema_or_config_same, ExtraBehavior}; use crate::errors::{ErrorTypeDefaults, ValError, ValLineError, ValResult}; use crate::input::{Arguments, BorrowInput, Input, KeywordArgs, PositionalArgs, ValidationMatch}; -use crate::lookup_key::LookupKey; +use crate::lookup_key::LookupKeyCollection; use crate::tools::SchemaDict; use super::validation_state::ValidationState; @@ -42,9 +42,10 @@ impl FromStr for VarKwargsMode { struct Parameter { positional: bool, name: String, - kw_lookup_key: Option, kwarg_key: Option>, validator: CombinedValidator, + lookup_key_collection: LookupKeyCollection, + mode: String, } #[derive(Debug)] @@ -56,6 +57,8 @@ pub struct ArgumentsValidator { var_kwargs_validator: Option>, loc_by_alias: bool, extra: ExtraBehavior, + validate_by_alias: Option, + validate_by_name: Option, } impl BuildValidator for ArgumentsValidator { @@ -68,8 +71,6 @@ impl BuildValidator for ArgumentsValidator { ) -> PyResult { let py = schema.py(); - let populate_by_name = schema_or_config_same(schema, config, intern!(py, "populate_by_name"))?.unwrap_or(false); - let arguments_schema: Bound<'_, PyList> = schema.get_as_req(intern!(py, "arguments_schema"))?; let mut parameters: Vec = Vec::with_capacity(arguments_schema.len()); @@ -97,18 +98,11 @@ impl BuildValidator for ArgumentsValidator { had_keyword_only = true; } - let mut kw_lookup_key = None; - let mut kwarg_key = None; - if mode == "keyword_only" || mode == "positional_or_keyword" { - kw_lookup_key = match arg.get_item(intern!(py, "alias"))? { - Some(alias) => { - let alt_alias = if populate_by_name { Some(name.as_str()) } else { None }; - Some(LookupKey::from_py(py, &alias, alt_alias)?) - } - None => Some(LookupKey::from_string(py, &name)), - }; - kwarg_key = Some(py_name.unbind()); - } + let kwarg_key = if matches!(mode, "keyword_only" | "positional_or_keyword") { + Some(py_name.unbind()) + } else { + None + }; let schema = arg.get_as_req(intern!(py, "schema"))?; @@ -132,12 +126,17 @@ impl BuildValidator for ArgumentsValidator { } else if has_default { had_default_arg = true; } + + let validation_alias = arg.get_item(intern!(py, "alias"))?; + let lookup_key_collection = LookupKeyCollection::new(py, validation_alias, name.as_str())?; + parameters.push(Parameter { positional, name, - kw_lookup_key, kwarg_key, validator, + lookup_key_collection, + mode: mode.to_string(), }); } @@ -168,6 +167,8 @@ impl BuildValidator for ArgumentsValidator { var_kwargs_validator, loc_by_alias: config.get_as(intern!(py, "loc_by_alias"))?.unwrap_or(true), extra: ExtraBehavior::from_schema_or_config(py, schema, config, ExtraBehavior::Forbid)?, + validate_by_alias: schema_or_config_same(schema, config, intern!(py, "validate_by_alias"))?, + validate_by_name: schema_or_config_same(schema, config, intern!(py, "validate_by_name"))?, } .into()) } @@ -198,6 +199,9 @@ impl Validator for ArgumentsValidator { let mut errors: Vec = Vec::new(); let mut used_kwargs: AHashSet<&str> = AHashSet::with_capacity(self.parameters.len()); + let validate_by_alias = state.validate_by_alias_or(self.validate_by_alias); + let validate_by_name = state.validate_by_name_or(self.validate_by_name); + // go through arguments getting the value from args or kwargs and validating it for (index, parameter) in self.parameters.iter().enumerate() { let mut pos_value = None; @@ -207,8 +211,17 @@ impl Validator for ArgumentsValidator { } } let mut kw_value = None; + let mut kw_lookup_key = None; + if matches!(parameter.mode.as_str(), "keyword_only" | "positional_or_keyword") { + kw_lookup_key = Some( + parameter + .lookup_key_collection + .select(validate_by_alias, validate_by_name)?, + ); + } + if let Some(kwargs) = args.kwargs() { - if let Some(ref lookup_key) = parameter.kw_lookup_key { + if let Some(lookup_key) = kw_lookup_key { if let Some((lookup_path, value)) = kwargs.get_item(lookup_key)? { used_kwargs.insert(lookup_path.first_key()); kw_value = Some((lookup_path, value)); @@ -254,7 +267,7 @@ impl Validator for ArgumentsValidator { } else { output_args.push(value); } - } else if let Some(ref lookup_key) = parameter.kw_lookup_key { + } else if let Some(lookup_key) = kw_lookup_key { let error_type = if parameter.positional { ErrorTypeDefaults::MissingArgument } else { diff --git a/src/validators/dataclass.rs b/src/validators/dataclass.rs index a774c6c9a..69c8279ea 100644 --- a/src/validators/dataclass.rs +++ b/src/validators/dataclass.rs @@ -12,7 +12,7 @@ use crate::errors::{ErrorType, ErrorTypeDefaults, ValError, ValLineError, ValRes use crate::input::{ input_as_python_instance, Arguments, BorrowInput, Input, InputType, KeywordArgs, PositionalArgs, ValidationMatch, }; -use crate::lookup_key::LookupKey; +use crate::lookup_key::LookupKeyCollection; use crate::tools::SchemaDict; use crate::validators::function::convert_err; @@ -27,7 +27,7 @@ struct Field { py_name: Py, init: bool, init_only: bool, - lookup_key: LookupKey, + lookup_key_collection: LookupKeyCollection, validator: CombinedValidator, frozen: bool, } @@ -42,6 +42,8 @@ pub struct DataclassArgsValidator { extra_behavior: ExtraBehavior, extras_validator: Option>, loc_by_alias: bool, + validate_by_alias: Option, + validate_by_name: Option, } impl BuildValidator for DataclassArgsValidator { @@ -54,8 +56,6 @@ impl BuildValidator for DataclassArgsValidator { ) -> PyResult { let py = schema.py(); - let populate_by_name = schema_or_config_same(schema, config, intern!(py, "populate_by_name"))?.unwrap_or(false); - let extra_behavior = ExtraBehavior::from_schema_or_config(py, schema, config, ExtraBehavior::Ignore)?; let extras_validator = match (schema.get_item(intern!(py, "extras_schema"))?, &extra_behavior) { @@ -75,14 +75,6 @@ impl BuildValidator for DataclassArgsValidator { let py_name: Bound<'_, PyString> = field.get_as_req(intern!(py, "name"))?; let name: String = py_name.extract()?; - let lookup_key = match field.get_item(intern!(py, "validation_alias"))? { - Some(alias) => { - let alt_alias = if populate_by_name { Some(name.as_str()) } else { None }; - LookupKey::from_py(py, &alias, alt_alias)? - } - None => LookupKey::from_string(py, &name), - }; - let schema = field.get_as_req(intern!(py, "schema"))?; let validator = match build_validator(&schema, config, definitions) { @@ -101,11 +93,14 @@ impl BuildValidator for DataclassArgsValidator { positional_count += 1; } + let validation_alias = field.get_item(intern!(py, "validation_alias"))?; + let lookup_key_collection = LookupKeyCollection::new(py, validation_alias, name.as_str())?; + fields.push(Field { kw_only, name, py_name: py_name.into(), - lookup_key, + lookup_key_collection, validator, init: field.get_as(intern!(py, "init"))?.unwrap_or(true), init_only: field.get_as(intern!(py, "init_only"))?.unwrap_or(false), @@ -130,6 +125,8 @@ impl BuildValidator for DataclassArgsValidator { extra_behavior, extras_validator, loc_by_alias: config.get_as(intern!(py, "loc_by_alias"))?.unwrap_or(true), + validate_by_alias: config.get_as(intern!(py, "validate_by_alias"))?, + validate_by_name: config.get_as(intern!(py, "validate_by_name"))?, } .into()) } @@ -158,6 +155,10 @@ impl Validator for DataclassArgsValidator { let mut used_keys: AHashSet<&str> = AHashSet::with_capacity(self.fields.len()); let state = &mut state.rebind_extra(|extra| extra.data = Some(output_dict.clone())); + + let validate_by_alias = state.validate_by_alias_or(self.validate_by_alias); + let validate_by_name = state.validate_by_name_or(self.validate_by_name); + let mut fields_set_count: usize = 0; macro_rules! set_item { @@ -200,9 +201,13 @@ impl Validator for DataclassArgsValidator { } } + let lookup_key = field + .lookup_key_collection + .select(validate_by_alias, validate_by_name)?; + let mut kw_value = None; if let Some(kwargs) = args.kwargs() { - if let Some((lookup_path, value)) = kwargs.get_item(&field.lookup_key)? { + if let Some((lookup_path, value)) = kwargs.get_item(lookup_key)? { used_keys.insert(lookup_path.first_key()); kw_value = Some((lookup_path, value)); } @@ -253,7 +258,7 @@ impl Validator for DataclassArgsValidator { } Ok(None) => { // This means there was no default value - errors.push(field.lookup_key.error( + errors.push(lookup_key.error( ErrorTypeDefaults::Missing, input, self.loc_by_alias, diff --git a/src/validators/generator.rs b/src/validators/generator.rs index 5e1b21c80..bc0cbd82a 100644 --- a/src/validators/generator.rs +++ b/src/validators/generator.rs @@ -279,6 +279,8 @@ impl InternalValidator { context: self.context.as_ref().map(|data| data.bind(py)), self_instance: self.self_instance.as_ref().map(|data| data.bind(py)), cache_str: self.cache_str, + by_alias: None, + by_name: None, }; let mut state = ValidationState::new(extra, &mut self.recursion_guard, false.into()); state.exactness = self.exactness; @@ -314,6 +316,8 @@ impl InternalValidator { context: self.context.as_ref().map(|data| data.bind(py)), self_instance: self.self_instance.as_ref().map(|data| data.bind(py)), cache_str: self.cache_str, + by_alias: None, + by_name: None, }; let mut state = ValidationState::new(extra, &mut self.recursion_guard, false.into()); state.exactness = self.exactness; diff --git a/src/validators/mod.rs b/src/validators/mod.rs index 75f39df29..4e40bf080 100644 --- a/src/validators/mod.rs +++ b/src/validators/mod.rs @@ -164,7 +164,7 @@ impl SchemaValidator { } #[allow(clippy::too_many_arguments)] - #[pyo3(signature = (input, *, strict=None, from_attributes=None, context=None, self_instance=None, allow_partial=PartialMode::Off))] + #[pyo3(signature = (input, *, strict=None, from_attributes=None, context=None, self_instance=None, allow_partial=PartialMode::Off, by_alias=None, by_name=None))] pub fn validate_python( &self, py: Python, @@ -174,6 +174,8 @@ impl SchemaValidator { context: Option<&Bound<'_, PyAny>>, self_instance: Option<&Bound<'_, PyAny>>, allow_partial: PartialMode, + by_alias: Option, + by_name: Option, ) -> PyResult { #[allow(clippy::used_underscore_items)] self._validate( @@ -185,11 +187,14 @@ impl SchemaValidator { context, self_instance, allow_partial, + by_alias, + by_name, ) .map_err(|e| self.prepare_validation_err(py, e, InputType::Python)) } - #[pyo3(signature = (input, *, strict=None, from_attributes=None, context=None, self_instance=None))] + #[allow(clippy::too_many_arguments)] + #[pyo3(signature = (input, *, strict=None, from_attributes=None, context=None, self_instance=None, by_alias=None, by_name=None))] pub fn isinstance_python( &self, py: Python, @@ -198,6 +203,8 @@ impl SchemaValidator { from_attributes: Option, context: Option<&Bound<'_, PyAny>>, self_instance: Option<&Bound<'_, PyAny>>, + by_alias: Option, + by_name: Option, ) -> PyResult { #[allow(clippy::used_underscore_items)] match self._validate( @@ -209,6 +216,8 @@ impl SchemaValidator { context, self_instance, false.into(), + by_alias, + by_name, ) { Ok(_) => Ok(true), Err(ValError::InternalErr(err)) => Err(err), @@ -218,7 +227,8 @@ impl SchemaValidator { } } - #[pyo3(signature = (input, *, strict=None, context=None, self_instance=None, allow_partial=PartialMode::Off))] + #[allow(clippy::too_many_arguments)] + #[pyo3(signature = (input, *, strict=None, context=None, self_instance=None, allow_partial=PartialMode::Off, by_alias=None, by_name=None))] pub fn validate_json( &self, py: Python, @@ -227,6 +237,8 @@ impl SchemaValidator { context: Option<&Bound<'_, PyAny>>, self_instance: Option<&Bound<'_, PyAny>>, allow_partial: PartialMode, + by_alias: Option, + by_name: Option, ) -> PyResult { let r = match json::validate_json_bytes(input) { #[allow(clippy::used_underscore_items)] @@ -238,13 +250,16 @@ impl SchemaValidator { context, self_instance, allow_partial, + by_alias, + by_name, ), Err(err) => Err(err), }; r.map_err(|e| self.prepare_validation_err(py, e, InputType::Json)) } - #[pyo3(signature = (input, *, strict=None, context=None, allow_partial=PartialMode::Off))] + #[allow(clippy::too_many_arguments)] + #[pyo3(signature = (input, *, strict=None, context=None, allow_partial=PartialMode::Off, by_alias=None, by_name=None))] pub fn validate_strings( &self, py: Python, @@ -252,19 +267,32 @@ impl SchemaValidator { strict: Option, context: Option<&Bound<'_, PyAny>>, allow_partial: PartialMode, + by_alias: Option, + by_name: Option, ) -> PyResult { let t = InputType::String; let string_mapping = StringMapping::new_value(input).map_err(|e| self.prepare_validation_err(py, e, t))?; #[allow(clippy::used_underscore_items)] - match self._validate(py, &string_mapping, t, strict, None, context, None, allow_partial) { + match self._validate( + py, + &string_mapping, + t, + strict, + None, + context, + None, + allow_partial, + by_alias, + by_name, + ) { Ok(r) => Ok(r), Err(e) => Err(self.prepare_validation_err(py, e, t)), } } #[allow(clippy::too_many_arguments)] - #[pyo3(signature = (obj, field_name, field_value, *, strict=None, from_attributes=None, context=None))] + #[pyo3(signature = (obj, field_name, field_value, *, strict=None, from_attributes=None, context=None, by_alias=None, by_name=None))] pub fn validate_assignment( &self, py: Python, @@ -274,6 +302,8 @@ impl SchemaValidator { strict: Option, from_attributes: Option, context: Option<&Bound<'_, PyAny>>, + by_alias: Option, + by_name: Option, ) -> PyResult { let extra = Extra { input_type: InputType::Python, @@ -283,6 +313,8 @@ impl SchemaValidator { context, self_instance: None, cache_str: self.cache_str, + by_alias, + by_name, }; let guard = &mut RecursionState::default(); @@ -307,6 +339,8 @@ impl SchemaValidator { context, self_instance: None, cache_str: self.cache_str, + by_alias: None, + by_name: None, }; let recursion_guard = &mut RecursionState::default(); let mut state = ValidationState::new(extra, recursion_guard, false.into()); @@ -356,6 +390,8 @@ impl SchemaValidator { context: Option<&Bound<'py, PyAny>>, self_instance: Option<&Bound<'py, PyAny>>, allow_partial: PartialMode, + by_alias: Option, + by_name: Option, ) -> ValResult { let mut recursion_guard = RecursionState::default(); let mut state = ValidationState::new( @@ -366,6 +402,8 @@ impl SchemaValidator { self_instance, input_type, self.cache_str, + by_alias, + by_name, ), &mut recursion_guard, allow_partial, @@ -383,6 +421,8 @@ impl SchemaValidator { context: Option<&Bound<'_, PyAny>>, self_instance: Option<&Bound<'_, PyAny>>, allow_partial: PartialMode, + by_alias: Option, + by_name: Option, ) -> ValResult { let json_value = jiter::JsonValue::parse_with_config(json_data, true, allow_partial) .map_err(|e| json::map_json_err(input, e, json_data))?; @@ -396,6 +436,8 @@ impl SchemaValidator { context, self_instance, allow_partial, + by_alias, + by_name, ) } @@ -432,7 +474,7 @@ impl<'py> SelfValidator<'py> { let py = schema.py(); let mut recursion_guard = RecursionState::default(); let mut state = ValidationState::new( - Extra::new(strict, None, None, None, InputType::Python, true.into()), + Extra::new(strict, None, None, None, InputType::Python, true.into(), None, None), &mut recursion_guard, false.into(), ); @@ -638,9 +680,14 @@ pub struct Extra<'a, 'py> { self_instance: Option<&'a Bound<'py, PyAny>>, /// Whether to use a cache of short strings to accelerate python string construction cache_str: StringCacheMode, + /// Whether to use the field's alias to match the input data to an attribute. + by_alias: Option, + /// Whether to use the field's name to match the input data to an attribute. + by_name: Option, } impl<'a, 'py> Extra<'a, 'py> { + #[allow(clippy::too_many_arguments)] pub fn new( strict: Option, from_attributes: Option, @@ -648,6 +695,8 @@ impl<'a, 'py> Extra<'a, 'py> { self_instance: Option<&'a Bound<'py, PyAny>>, input_type: InputType, cache_str: StringCacheMode, + by_alias: Option, + by_name: Option, ) -> Self { Extra { input_type, @@ -657,6 +706,8 @@ impl<'a, 'py> Extra<'a, 'py> { context, self_instance, cache_str, + by_alias, + by_name, } } } @@ -671,6 +722,8 @@ impl Extra<'_, '_> { context: self.context, self_instance: self.self_instance, cache_str: self.cache_str, + by_alias: self.by_alias, + by_name: self.by_name, } } } diff --git a/src/validators/model_fields.rs b/src/validators/model_fields.rs index 392760964..1ef684890 100644 --- a/src/validators/model_fields.rs +++ b/src/validators/model_fields.rs @@ -12,7 +12,7 @@ use crate::errors::LocItem; use crate::errors::{ErrorType, ErrorTypeDefaults, ValError, ValLineError, ValResult}; use crate::input::ConsumeIterator; use crate::input::{BorrowInput, Input, ValidatedDict, ValidationMatch}; -use crate::lookup_key::LookupKey; +use crate::lookup_key::LookupKeyCollection; use crate::tools::SchemaDict; use super::{build_validator, BuildValidator, CombinedValidator, DefinitionsBuilder, ValidationState, Validator}; @@ -20,7 +20,7 @@ use super::{build_validator, BuildValidator, CombinedValidator, DefinitionsBuild #[derive(Debug)] struct Field { name: String, - lookup_key: LookupKey, + lookup_key_collection: LookupKeyCollection, name_py: Py, validator: CombinedValidator, frozen: bool, @@ -37,6 +37,8 @@ pub struct ModelFieldsValidator { strict: bool, from_attributes: bool, loc_by_alias: bool, + validate_by_alias: Option, + validate_by_name: Option, } impl BuildValidator for ModelFieldsValidator { @@ -48,10 +50,10 @@ impl BuildValidator for ModelFieldsValidator { definitions: &mut DefinitionsBuilder, ) -> PyResult { let py = schema.py(); + let strict = is_strict(schema, config)?; let from_attributes = schema_or_config_same(schema, config, intern!(py, "from_attributes"))?.unwrap_or(false); - let populate_by_name = schema_or_config_same(schema, config, intern!(py, "populate_by_name"))?.unwrap_or(false); let extra_behavior = ExtraBehavior::from_schema_or_config(py, schema, config, ExtraBehavior::Ignore)?; @@ -79,17 +81,12 @@ impl BuildValidator for ModelFieldsValidator { Err(err) => return py_schema_err!("Field \"{}\":\n {}", field_name, err), }; - let lookup_key = match field_info.get_item(intern!(py, "validation_alias"))? { - Some(alias) => { - let alt_alias = if populate_by_name { Some(field_name) } else { None }; - LookupKey::from_py(py, &alias, alt_alias)? - } - None => LookupKey::from_string(py, field_name), - }; + let validation_alias = field_info.get_item(intern!(py, "validation_alias"))?; + let lookup_key_collection = LookupKeyCollection::new(py, validation_alias, field_name)?; fields.push(Field { name: field_name.to_string(), - lookup_key, + lookup_key_collection, name_py: field_name_py.into(), validator, frozen: field_info.get_as::(intern!(py, "frozen"))?.unwrap_or(false), @@ -104,6 +101,8 @@ impl BuildValidator for ModelFieldsValidator { strict, from_attributes, loc_by_alias: config.get_as(intern!(py, "loc_by_alias"))?.unwrap_or(true), + validate_by_alias: config.get_as(intern!(py, "validate_by_alias"))?, + validate_by_name: config.get_as(intern!(py, "validate_by_name"))?, } .into()) } @@ -156,6 +155,9 @@ impl Validator for ModelFieldsValidator { let mut fields_set_vec: Vec> = Vec::with_capacity(self.fields.len()); let mut fields_set_count: usize = 0; + let validate_by_alias = state.validate_by_alias_or(self.validate_by_alias); + let validate_by_name = state.validate_by_name_or(self.validate_by_name); + // we only care about which keys have been used if we're iterating over the object for extra after // the first pass let mut used_keys: Option> = @@ -169,7 +171,10 @@ impl Validator for ModelFieldsValidator { let state = &mut state.rebind_extra(|extra| extra.data = Some(model_dict.clone())); for field in &self.fields { - let op_key_value = match dict.get_item(&field.lookup_key) { + let lookup_key = field + .lookup_key_collection + .select(validate_by_alias, validate_by_name)?; + let op_key_value = match dict.get_item(lookup_key) { Ok(v) => v, Err(ValError::LineErrors(line_errors)) => { for err in line_errors { @@ -209,7 +214,7 @@ impl Validator for ModelFieldsValidator { } Ok(None) => { // This means there was no default value - errors.push(field.lookup_key.error( + errors.push(lookup_key.error( ErrorTypeDefaults::Missing, input, self.loc_by_alias, diff --git a/src/validators/typed_dict.rs b/src/validators/typed_dict.rs index 3365c0169..999926762 100644 --- a/src/validators/typed_dict.rs +++ b/src/validators/typed_dict.rs @@ -3,14 +3,14 @@ use pyo3::prelude::*; use pyo3::types::{PyDict, PyString}; use crate::build_tools::py_schema_err; -use crate::build_tools::{is_strict, schema_or_config, schema_or_config_same, ExtraBehavior}; +use crate::build_tools::{is_strict, schema_or_config, ExtraBehavior}; use crate::errors::LocItem; use crate::errors::{ErrorTypeDefaults, ValError, ValLineError, ValResult}; use crate::input::BorrowInput; use crate::input::ConsumeIterator; use crate::input::ValidationMatch; use crate::input::{Input, ValidatedDict}; -use crate::lookup_key::LookupKey; +use crate::lookup_key::LookupKeyCollection; use crate::tools::SchemaDict; use ahash::AHashSet; use jiter::PartialMode; @@ -20,7 +20,7 @@ use super::{build_validator, BuildValidator, CombinedValidator, DefinitionsBuild #[derive(Debug)] struct TypedDictField { name: String, - lookup_key: LookupKey, + lookup_key_collection: LookupKeyCollection, name_py: Py, required: bool, validator: CombinedValidator, @@ -35,6 +35,8 @@ pub struct TypedDictValidator { extras_validator: Option>, strict: bool, loc_by_alias: bool, + validate_by_alias: Option, + validate_by_name: Option, } impl BuildValidator for TypedDictValidator { @@ -55,7 +57,6 @@ impl BuildValidator for TypedDictValidator { let total = schema_or_config(schema, config, intern!(py, "total"), intern!(py, "typed_dict_total"))?.unwrap_or(true); - let populate_by_name = schema_or_config_same(schema, config, intern!(py, "populate_by_name"))?.unwrap_or(false); let extra_behavior = ExtraBehavior::from_schema_or_config(py, schema, config, ExtraBehavior::Ignore)?; @@ -108,17 +109,12 @@ impl BuildValidator for TypedDictValidator { } } - let lookup_key = match field_info.get_item(intern!(py, "validation_alias"))? { - Some(alias) => { - let alt_alias = if populate_by_name { Some(field_name) } else { None }; - LookupKey::from_py(py, &alias, alt_alias)? - } - None => LookupKey::from_string(py, field_name), - }; + let validation_alias = field_info.get_item(intern!(py, "validation_alias"))?; + let lookup_key_collection = LookupKeyCollection::new(py, validation_alias, field_name)?; fields.push(TypedDictField { name: field_name.to_string(), - lookup_key, + lookup_key_collection, name_py: field_name_py.into(), validator, required, @@ -130,6 +126,8 @@ impl BuildValidator for TypedDictValidator { extras_validator, strict, loc_by_alias: config.get_as(intern!(py, "loc_by_alias"))?.unwrap_or(true), + validate_by_alias: config.get_as(intern!(py, "validate_by_alias"))?, + validate_by_name: config.get_as(intern!(py, "validate_by_name"))?, } .into()) } @@ -160,6 +158,9 @@ impl Validator for TypedDictValidator { }; let allow_partial = state.allow_partial; + let validate_by_alias = state.validate_by_alias_or(self.validate_by_alias); + let validate_by_name = state.validate_by_name_or(self.validate_by_name); + // we only care about which keys have been used if we're iterating over the object for extra after // the first pass let mut used_keys: Option> = @@ -171,10 +172,14 @@ impl Validator for TypedDictValidator { { let state = &mut state.rebind_extra(|extra| extra.data = Some(output_dict.clone())); + let mut fields_set_count: usize = 0; for field in &self.fields { - let op_key_value = match dict.get_item(&field.lookup_key) { + let lookup_key = field + .lookup_key_collection + .select(validate_by_alias, validate_by_name)?; + let op_key_value = match dict.get_item(lookup_key) { Ok(v) => v, Err(ValError::LineErrors(line_errors)) => { let field_loc: LocItem = field.name.clone().into(); @@ -229,7 +234,7 @@ impl Validator for TypedDictValidator { Ok(None) => { // This means there was no default value if field.required { - errors.push(field.lookup_key.error( + errors.push(lookup_key.error( ErrorTypeDefaults::Missing, input, self.loc_by_alias, diff --git a/src/validators/validation_state.rs b/src/validators/validation_state.rs index 8ee41f5de..b5bf37f48 100644 --- a/src/validators/validation_state.rs +++ b/src/validators/validation_state.rs @@ -64,6 +64,14 @@ impl<'a, 'py> ValidationState<'a, 'py> { self.extra.strict.unwrap_or(default) } + pub fn validate_by_alias_or(&self, default: Option) -> bool { + self.extra.by_alias.or(default).unwrap_or(true) + } + + pub fn validate_by_name_or(&self, default: Option) -> bool { + self.extra.by_name.or(default).unwrap_or(false) + } + /// Sets the exactness to the lower of the current exactness /// and the given exactness. /// diff --git a/tests/serializers/test_dataclasses.py b/tests/serializers/test_dataclasses.py index d7fbcffaf..e14ea268e 100644 --- a/tests/serializers/test_dataclasses.py +++ b/tests/serializers/test_dataclasses.py @@ -84,9 +84,9 @@ def test_serialization_alias(): ['a', 'b'], ) s = SchemaSerializer(schema) - assert s.to_python(Foo(a='hello', b=b'more')) == IsStrictDict(a='hello', BAR=b'more') - assert s.to_python(Foo(a='hello', b=b'more'), mode='json') == IsStrictDict(a='hello', BAR='more') - j = s.to_json(Foo(a='hello', b=b'more')) + assert s.to_python(Foo(a='hello', b=b'more'), by_alias=True) == IsStrictDict(a='hello', BAR=b'more') + assert s.to_python(Foo(a='hello', b=b'more'), mode='json', by_alias=True) == IsStrictDict(a='hello', BAR='more') + j = s.to_json(Foo(a='hello', b=b'more'), by_alias=True) if on_pypy: assert json.loads(j) == {'a': 'hello', 'BAR': 'more'} @@ -235,3 +235,46 @@ class Bar: s = SchemaSerializer(schema) assert s.to_python(Foo(x=1), warnings='error') == {'x': 1} assert s.to_python(Foo(x=1, init_var=2), warnings='error') == {'x': 1} + + +@pytest.mark.parametrize( + 'config,runtime,expected', + [ + (True, True, {'my_alias': 'hello'}), + (True, False, {'my_field': 'hello'}), + (True, None, {'my_alias': 'hello'}), + (False, True, {'my_alias': 'hello'}), + (False, False, {'my_field': 'hello'}), + (False, None, {'my_field': 'hello'}), + (None, True, {'my_alias': 'hello'}), + (None, False, {'my_field': 'hello'}), + (None, None, {'my_field': 'hello'}), + ], +) +def test_by_alias_and_name_config_interaction(config, runtime, expected) -> None: + """This test reflects the priority that applies for config vs runtime serialization alias configuration. + + If the runtime value (by_alias) is set, that value is used. + If the runtime value is unset, the config value (serialize_by_alias) is used. + If neither are set, the default, False, is used. + """ + + @dataclasses.dataclass + class Foo: + my_field: str + + schema = core_schema.dataclass_schema( + Foo, + core_schema.dataclass_args_schema( + 'Foo', + [ + core_schema.dataclass_field( + name='my_field', schema=core_schema.str_schema(), serialization_alias='my_alias' + ), + ], + ), + ['my_field'], + config=core_schema.CoreConfig(serialize_by_alias=config or False), + ) + s = SchemaSerializer(schema) + assert s.to_python(Foo(my_field='hello'), by_alias=runtime) == expected diff --git a/tests/serializers/test_functions.py b/tests/serializers/test_functions.py index a151b7454..738e8acc2 100644 --- a/tests/serializers/test_functions.py +++ b/tests/serializers/test_functions.py @@ -68,7 +68,7 @@ def double(value, info): # insert_assert(f_info) assert f_info == { 'mode': 'python', - 'by_alias': True, + 'by_alias': None, 'exclude_unset': False, 'exclude_defaults': False, 'exclude_none': False, @@ -77,7 +77,7 @@ def double(value, info): } assert s.to_python('x') == 'xx' - assert s.to_python(4, mode='foobar') == 8 + assert s.to_python(4, mode='foobar', by_alias=True) == 8 # insert_assert(f_info) assert f_info == { 'mode': 'foobar', @@ -93,7 +93,7 @@ def double(value, info): # insert_assert(f_info) assert f_info == { 'mode': 'json', - 'by_alias': True, + 'by_alias': None, 'exclude_unset': False, 'exclude_defaults': False, 'exclude_none': False, @@ -119,7 +119,7 @@ def double(value, info): 'include': {3, 2, 1}, 'exclude': {'foo': {'bar'}}, 'mode': 'python', - 'by_alias': True, + 'by_alias': None, 'exclude_unset': False, 'exclude_defaults': False, 'exclude_none': False, @@ -132,7 +132,7 @@ def double(value, info): assert f_info == { 'context': 'context', 'mode': 'python', - 'by_alias': True, + 'by_alias': None, 'exclude_unset': False, 'exclude_defaults': False, 'exclude_none': False, @@ -230,27 +230,27 @@ def append_args(value, info): ) ) assert s.to_python(123) == ( - "123 info=SerializationInfo(include=None, exclude=None, context=None, mode='python', by_alias=True, exclude_unset=False, " + "123 info=SerializationInfo(include=None, exclude=None, context=None, mode='python', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) assert s.to_python(123, mode='other') == ( - "123 info=SerializationInfo(include=None, exclude=None, context=None, mode='other', by_alias=True, exclude_unset=False, " + "123 info=SerializationInfo(include=None, exclude=None, context=None, mode='other', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) assert s.to_python(123, include={'x'}) == ( - "123 info=SerializationInfo(include={'x'}, exclude=None, context=None, mode='python', by_alias=True, exclude_unset=False, " + "123 info=SerializationInfo(include={'x'}, exclude=None, context=None, mode='python', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) assert s.to_python(123, context='context') == ( - "123 info=SerializationInfo(include=None, exclude=None, context='context', mode='python', by_alias=True, exclude_unset=False, " + "123 info=SerializationInfo(include=None, exclude=None, context='context', mode='python', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) assert s.to_python(123, mode='json', exclude={1: {2}}) == ( - "123 info=SerializationInfo(include=None, exclude={1: {2}}, context=None, mode='json', by_alias=True, exclude_unset=False, " + "123 info=SerializationInfo(include=None, exclude={1: {2}}, context=None, mode='json', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) assert s.to_json(123) == ( - b"\"123 info=SerializationInfo(include=None, exclude=None, context=None, mode='json', by_alias=True, exclude_unset=False, " + b"\"123 info=SerializationInfo(include=None, exclude=None, context=None, mode='json', by_alias=False, exclude_unset=False, " b'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)"' ) diff --git a/tests/serializers/test_model.py b/tests/serializers/test_model.py index cb0298622..8786359e8 100644 --- a/tests/serializers/test_model.py +++ b/tests/serializers/test_model.py @@ -217,7 +217,7 @@ def test_alias(): ) ) value = BasicModel(cat=0, dog=1, bird=2) - assert s.to_python(value) == IsStrictDict(Meow=0, Woof=1, bird=2) + assert s.to_python(value, by_alias=True) == IsStrictDict(Meow=0, Woof=1, bird=2) def test_model_wrong_warn(): @@ -700,9 +700,9 @@ def volume(self) -> int: ), ) ) - assert s.to_python(Model(3, 4)) == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48} - assert s.to_python(Model(3, 4), mode='json') == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48} - assert s.to_json(Model(3, 4)) == b'{"width":3,"height":4,"Area":12,"volume":48}' + assert s.to_python(Model(3, 4), by_alias=True) == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48} + assert s.to_python(Model(3, 4), mode='json', by_alias=True) == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48} + assert s.to_json(Model(3, 4), by_alias=True) == b'{"width":3,"height":4,"Area":12,"volume":48}' def test_computed_field_exclude_none(): @@ -734,17 +734,28 @@ def volume(self) -> None: ), ) ) - assert s.to_python(Model(3, 4), exclude_none=False) == {'width': 3, 'height': 4, 'Area': 12, 'volume': None} - assert s.to_python(Model(3, 4), exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12} - assert s.to_python(Model(3, 4), mode='json', exclude_none=False) == { + assert s.to_python(Model(3, 4), exclude_none=False, by_alias=True) == { 'width': 3, 'height': 4, 'Area': 12, 'volume': None, } - assert s.to_python(Model(3, 4), mode='json', exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12} - assert s.to_json(Model(3, 4), exclude_none=False) == b'{"width":3,"height":4,"Area":12,"volume":null}' - assert s.to_json(Model(3, 4), exclude_none=True) == b'{"width":3,"height":4,"Area":12}' + assert s.to_python(Model(3, 4), exclude_none=True, by_alias=True) == {'width': 3, 'height': 4, 'Area': 12} + assert s.to_python(Model(3, 4), mode='json', exclude_none=False, by_alias=True) == { + 'width': 3, + 'height': 4, + 'Area': 12, + 'volume': None, + } + assert s.to_python(Model(3, 4), mode='json', exclude_none=True, by_alias=True) == { + 'width': 3, + 'height': 4, + 'Area': 12, + } + assert ( + s.to_json(Model(3, 4), exclude_none=False, by_alias=True) == b'{"width":3,"height":4,"Area":12,"volume":null}' + ) + assert s.to_json(Model(3, 4), exclude_none=True, by_alias=True) == b'{"width":3,"height":4,"Area":12}' def test_computed_field_exclude_none_different_order(): @@ -779,17 +790,28 @@ def area(self) -> int: ), ) ) - assert s.to_python(Model(3, 4), exclude_none=False) == {'width': 3, 'height': 4, 'Area': 12, 'volume': None} - assert s.to_python(Model(3, 4), exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12} - assert s.to_python(Model(3, 4), mode='json', exclude_none=False) == { + assert s.to_python(Model(3, 4), by_alias=True, exclude_none=False) == { 'width': 3, 'height': 4, 'Area': 12, 'volume': None, } - assert s.to_python(Model(3, 4), mode='json', exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12} - assert s.to_json(Model(3, 4), exclude_none=False) == b'{"width":3,"height":4,"volume":null,"Area":12}' - assert s.to_json(Model(3, 4), exclude_none=True) == b'{"width":3,"height":4,"Area":12}' + assert s.to_python(Model(3, 4), by_alias=True, exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12} + assert s.to_python(Model(3, 4), by_alias=True, mode='json', exclude_none=False) == { + 'width': 3, + 'height': 4, + 'Area': 12, + 'volume': None, + } + assert s.to_python(Model(3, 4), mode='json', by_alias=True, exclude_none=True) == { + 'width': 3, + 'height': 4, + 'Area': 12, + } + assert ( + s.to_json(Model(3, 4), exclude_none=False, by_alias=True) == b'{"width":3,"height":4,"volume":null,"Area":12}' + ) + assert s.to_json(Model(3, 4), exclude_none=True, by_alias=True) == b'{"width":3,"height":4,"Area":12}' @pytest.mark.skipif(cached_property is None, reason='cached_property is not available') @@ -1152,3 +1174,42 @@ class BModel(BasicModel): ... with pytest.warns(UserWarning, match='Expected 2 fields but got 1 for type `.*AModel` with value `.*`.+'): value = BasicModel(root=AModel(type='a')) s.to_python(value) + + +@pytest.mark.parametrize( + 'config,runtime,expected', + [ + (True, True, {'my_alias': 1}), + (True, False, {'my_field': 1}), + (True, None, {'my_alias': 1}), + (False, True, {'my_alias': 1}), + (False, False, {'my_field': 1}), + (False, None, {'my_field': 1}), + (None, True, {'my_alias': 1}), + (None, False, {'my_field': 1}), + (None, None, {'my_field': 1}), + ], +) +def test_by_alias_and_name_config_interaction(config, runtime, expected) -> None: + """This test reflects the priority that applies for config vs runtime serialization alias configuration. + + If the runtime value (by_alias) is set, that value is used. + If the runtime value is unset, the config value (serialize_by_alias) is used. + If neither are set, the default, False, is used. + """ + + class Model: + def __init__(self, my_field: int) -> None: + self.my_field = my_field + + schema = core_schema.model_schema( + Model, + core_schema.model_fields_schema( + { + 'my_field': core_schema.model_field(core_schema.int_schema(), serialization_alias='my_alias'), + } + ), + config=core_schema.CoreConfig(serialize_by_alias=config or False), + ) + s = SchemaSerializer(schema) + assert s.to_python(Model(1), by_alias=runtime) == expected diff --git a/tests/serializers/test_typed_dict.py b/tests/serializers/test_typed_dict.py index 5fc9adcda..bd2016d18 100644 --- a/tests/serializers/test_typed_dict.py +++ b/tests/serializers/test_typed_dict.py @@ -114,16 +114,16 @@ def test_alias(): ) ) value = {'cat': 0, 'dog': 1, 'bird': 2} - assert s.to_python(value) == IsStrictDict(Meow=0, Woof=1, bird=2) - assert s.to_python(value, exclude={'dog'}) == IsStrictDict(Meow=0, bird=2) + assert s.to_python(value, by_alias=True) == IsStrictDict(Meow=0, Woof=1, bird=2) + assert s.to_python(value, exclude={'dog'}, by_alias=True) == IsStrictDict(Meow=0, bird=2) assert s.to_python(value, by_alias=False) == IsStrictDict(cat=0, dog=1, bird=2) - assert s.to_python(value, mode='json') == IsStrictDict(Meow=0, Woof=1, bird=2) - assert s.to_python(value, mode='json', include={'cat'}) == IsStrictDict(Meow=0) + assert s.to_python(value, mode='json', by_alias=True) == IsStrictDict(Meow=0, Woof=1, bird=2) + assert s.to_python(value, mode='json', include={'cat'}, by_alias=True) == IsStrictDict(Meow=0) assert s.to_python(value, mode='json', by_alias=False) == IsStrictDict(cat=0, dog=1, bird=2) - assert json.loads(s.to_json(value)) == IsStrictDict(Meow=0, Woof=1, bird=2) - assert json.loads(s.to_json(value, include={'cat', 'bird'})) == IsStrictDict(Meow=0, bird=2) + assert json.loads(s.to_json(value, by_alias=True)) == IsStrictDict(Meow=0, Woof=1, bird=2) + assert json.loads(s.to_json(value, include={'cat', 'bird'}, by_alias=True)) == IsStrictDict(Meow=0, bird=2) assert json.loads(s.to_json(value, by_alias=False)) == IsStrictDict(cat=0, dog=1, bird=2) @@ -333,3 +333,37 @@ def test_extra_custom_serializer(): m = {'extra': 'extra'} assert s.to_python(m) == {'extra': 'extra bam!'} + + +@pytest.mark.parametrize( + 'config,runtime,expected', + [ + (True, True, {'my_alias': 1}), + (True, False, {'my_field': 1}), + (True, None, {'my_alias': 1}), + (False, True, {'my_alias': 1}), + (False, False, {'my_field': 1}), + (False, None, {'my_field': 1}), + (None, True, {'my_alias': 1}), + (None, False, {'my_field': 1}), + (None, None, {'my_field': 1}), + ], +) +def test_by_alias_and_name_config_interaction(config, runtime, expected) -> None: + """This test reflects the priority that applies for config vs runtime serialization alias configuration. + + If the runtime value (by_alias) is set, that value is used. + If the runtime value is unset, the config value (serialize_by_alias) is used. + If neither are set, the default, False, is used. + """ + + class Model(TypedDict): + my_field: int + + schema = core_schema.typed_dict_schema( + { + 'my_field': core_schema.typed_dict_field(core_schema.int_schema(), serialization_alias='my_alias'), + }, + ) + s = SchemaSerializer(schema, config=core_schema.CoreConfig(serialize_by_alias=config or False)) + assert s.to_python(Model(my_field=1), by_alias=runtime) == expected diff --git a/tests/serializers/test_union.py b/tests/serializers/test_union.py index 96bfb54f7..15b9bf2c0 100644 --- a/tests/serializers/test_union.py +++ b/tests/serializers/test_union.py @@ -760,8 +760,8 @@ class ModelB: model_a = ModelA(field=1) model_b = ModelB(field=1) - assert s.to_python(model_a) == {'field': 1, 'TAG': 'a'} - assert s.to_python(model_b) == {'field': 1, 'TAG': 'b'} + assert s.to_python(model_a, by_alias=True) == {'field': 1, 'TAG': 'a'} + assert s.to_python(model_b, by_alias=True) == {'field': 1, 'TAG': 'b'} def test_union_model_wrap_serializer(): diff --git a/tests/test.rs b/tests/test.rs index b59fbec54..58e2904f5 100644 --- a/tests/test.rs +++ b/tests/test.rs @@ -95,7 +95,7 @@ a = A() None, None, None, - true, + Some(true), false, false, false, @@ -136,7 +136,7 @@ json_input = '{"a": "something"}' let json_input = locals.get_item("json_input").unwrap().unwrap(); let binding = SchemaValidator::py_new(py, &schema, None) .unwrap() - .validate_json(py, &json_input, None, None, None, false.into()) + .validate_json(py, &json_input, None, None, None, false.into(), None, None) .unwrap(); let validation_result: Bound<'_, PyAny> = binding.extract(py).unwrap(); let repr = format!("{}", validation_result.repr().unwrap()); @@ -203,7 +203,7 @@ dump_json_input_2 = {'a': 'something'} None, None, None, - false, + Some(false), false, false, false, @@ -224,7 +224,7 @@ dump_json_input_2 = {'a': 'something'} None, None, None, - false, + Some(false), false, false, false, diff --git a/tests/test_json.py b/tests/test_json.py index 6b365a4cb..4d40ceb16 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -282,9 +282,9 @@ def __init__(self, my_foo: int, my_inners: list['Foobar']): Foobar.__pydantic_serializer__ = s instance = Foobar(my_foo=1, my_inners=[Foobar(my_foo=2, my_inners=[])]) - assert to_jsonable_python(instance) == {'myFoo': 1, 'myInners': [{'myFoo': 2, 'myInners': []}]} + assert to_jsonable_python(instance, by_alias=True) == {'myFoo': 1, 'myInners': [{'myFoo': 2, 'myInners': []}]} assert to_jsonable_python(instance, by_alias=False) == {'my_foo': 1, 'my_inners': [{'my_foo': 2, 'my_inners': []}]} - assert to_json(instance) == b'{"myFoo":1,"myInners":[{"myFoo":2,"myInners":[]}]}' + assert to_json(instance, by_alias=True) == b'{"myFoo":1,"myInners":[{"myFoo":2,"myInners":[]}]}' assert to_json(instance, by_alias=False) == b'{"my_foo":1,"my_inners":[{"my_foo":2,"my_inners":[]}]}' diff --git a/tests/test_typing.py b/tests/test_typing.py index 2f460fb48..20a50a368 100644 --- a/tests/test_typing.py +++ b/tests/test_typing.py @@ -232,7 +232,7 @@ def f(input: Any, info: core_schema.SerializationInfo, /) -> str: ) ) assert s.to_python(123) == ( - "SerializationInfo(include=None, exclude=None, context=None, mode='python', by_alias=True, exclude_unset=False, " + "SerializationInfo(include=None, exclude=None, context=None, mode='python', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) @@ -253,7 +253,7 @@ def f( # insert_assert(s.to_python(123, mode='json')) assert s.to_python(123, mode='json') == ( 'SerializationCallable(serializer=str) ' - "SerializationInfo(include=None, exclude=None, context=None, mode='json', by_alias=True, exclude_unset=False, " + "SerializationInfo(include=None, exclude=None, context=None, mode='json', by_alias=False, exclude_unset=False, " 'exclude_defaults=False, exclude_none=False, round_trip=False, serialize_as_any=False)' ) diff --git a/tests/validators/test_arguments.py b/tests/validators/test_arguments.py index 496363a1f..ec7c409bb 100644 --- a/tests/validators/test_arguments.py +++ b/tests/validators/test_arguments.py @@ -2,7 +2,7 @@ import sys from functools import wraps from inspect import Parameter, signature -from typing import Any, get_type_hints +from typing import Any, Union, get_type_hints import pytest @@ -883,14 +883,14 @@ def test_alias(py_and_json: PyAndJson, input_value, expected): ], ids=repr, ) -def test_alias_populate_by_name(py_and_json: PyAndJson, input_value, expected): +def test_alias_validate_by_name(py_and_json: PyAndJson, input_value, expected): v = py_and_json( { 'type': 'arguments', 'arguments_schema': [ {'name': 'a', 'mode': 'positional_or_keyword', 'schema': {'type': 'int'}, 'alias': 'Foo'} ], - 'populate_by_name': True, + 'validate_by_name': True, } ) if isinstance(expected, Err): @@ -900,6 +900,34 @@ def test_alias_populate_by_name(py_and_json: PyAndJson, input_value, expected): assert v.validate_test(input_value) == expected +def test_only_validate_by_name(py_and_json) -> None: + schema = core_schema.arguments_schema( + [ + core_schema.arguments_parameter(name='a', schema=core_schema.str_schema(), alias='FieldA'), + ], + validate_by_name=True, + validate_by_alias=False, + ) + v = py_and_json(schema) + assert v.validate_test(ArgsKwargs((), {'a': 'hello'})) == ((), {'a': 'hello'}) + with pytest.raises(ValidationError, match=r'a\n +Missing required argument \[type=missing_argument,'): + assert v.validate_test(ArgsKwargs((), {'FieldA': 'hello'})) + + +def test_only_allow_alias(py_and_json) -> None: + schema = core_schema.arguments_schema( + [ + core_schema.arguments_parameter(name='a', schema=core_schema.str_schema(), alias='FieldA'), + ], + validate_by_name=False, + validate_by_alias=True, + ) + v = py_and_json(schema) + assert v.validate_test(ArgsKwargs((), {'FieldA': 'hello'})) == ((), {'a': 'hello'}) + with pytest.raises(ValidationError, match=r'FieldA\n +Missing required argument \[type=missing_argument,'): + assert v.validate_test(ArgsKwargs((), {'a': 'hello'})) + + def validate(config=None): def decorator(function): parameters = signature(function).parameters @@ -1145,3 +1173,50 @@ def test_error_display(pydantic_version): '[{"type":"missing_argument","loc":["b"],"msg":"Missing required argument",' '"input":"ArgsKwargs((), {\'a\': 1})"}]' ) + + +@pytest.mark.parametrize('config_by_alias', [None, True, False]) +@pytest.mark.parametrize('config_by_name', [None, True, False]) +@pytest.mark.parametrize('runtime_by_alias', [None, True, False]) +@pytest.mark.parametrize('runtime_by_name', [None, True, False]) +def test_by_alias_and_name_config_interaction( + config_by_alias: Union[bool, None], + config_by_name: Union[bool, None], + runtime_by_alias: Union[bool, None], + runtime_by_name: Union[bool, None], +) -> None: + """This test reflects the priority that applies for config vs runtime validation alias configuration. + + Runtime values take precedence over config values, when set. + By default, by_alias is True and by_name is False. + """ + + if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False: + pytest.skip("Can't have both by_alias and by_name as effectively False") + + schema = core_schema.arguments_schema( + arguments=[ + core_schema.arguments_parameter(name='my_field', schema=core_schema.int_schema(), alias='my_alias'), + ], + **({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}), + **({'validate_by_name': config_by_name} if config_by_name is not None else {}), + ) + s = SchemaValidator(schema) + + alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None) + name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None) + + if alias_allowed: + assert s.validate_python( + ArgsKwargs((), {'my_alias': 1}), by_alias=runtime_by_alias, by_name=runtime_by_name + ) == ( + (), + {'my_field': 1}, + ) + if name_allowed: + assert s.validate_python( + ArgsKwargs((), {'my_field': 1}), by_alias=runtime_by_alias, by_name=runtime_by_name + ) == ( + (), + {'my_field': 1}, + ) diff --git a/tests/validators/test_dataclasses.py b/tests/validators/test_dataclasses.py index 09bd4e762..cfead4423 100644 --- a/tests/validators/test_dataclasses.py +++ b/tests/validators/test_dataclasses.py @@ -1708,3 +1708,125 @@ class Foo: assert exc_info.value.errors(include_url=False) == expected.errors else: assert dataclasses.asdict(v.validate_python(input_value)) == expected + + +@dataclasses.dataclass +class BasicDataclass: + a: str + + +def test_alias_allow_pop(py_and_json: PyAndJson): + schema = core_schema.dataclass_schema( + BasicDataclass, + core_schema.dataclass_args_schema( + 'BasicDataclass', + [ + core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), validation_alias='FieldA'), + ], + ), + ['a'], + config=core_schema.CoreConfig(validate_by_name=True, validate_by_alias=True), + ) + v = py_and_json(schema) + assert v.validate_test({'FieldA': 'hello'}) == BasicDataclass(a='hello') + assert v.validate_test({'a': 'hello'}) == BasicDataclass(a='hello') + assert v.validate_test( + { + 'FieldA': 'hello', + 'a': 'world', + } + ) == BasicDataclass(a='hello') + with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'): + assert v.validate_test({'foobar': 'hello'}) + + +def test_only_validate_by_name(py_and_json) -> None: + schema = core_schema.dataclass_schema( + BasicDataclass, + core_schema.dataclass_args_schema( + 'BasicDataclass', + [ + core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), validation_alias='FieldA'), + ], + ), + ['a'], + config=core_schema.CoreConfig(validate_by_name=True, validate_by_alias=False), + ) + v = py_and_json(schema) + assert v.validate_test({'a': 'hello'}) == BasicDataclass(a='hello') + with pytest.raises(ValidationError, match=r'a\n +Field required \[type=missing,'): + assert v.validate_test({'FieldA': 'hello'}) + + +def test_only_allow_alias(py_and_json) -> None: + schema = core_schema.dataclass_schema( + BasicDataclass, + core_schema.dataclass_args_schema( + 'BasicDataclass', + [ + core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), validation_alias='FieldA'), + ], + ), + ['a'], + config=core_schema.CoreConfig(validate_by_name=False, validate_by_alias=True), + ) + v = py_and_json(schema) + assert v.validate_test({'FieldA': 'hello'}) == BasicDataclass(a='hello') + with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'): + assert v.validate_test({'a': 'hello'}) + + +@pytest.mark.parametrize('config_by_alias', [None, True, False]) +@pytest.mark.parametrize('config_by_name', [None, True, False]) +@pytest.mark.parametrize('runtime_by_alias', [None, True, False]) +@pytest.mark.parametrize('runtime_by_name', [None, True, False]) +def test_by_alias_and_name_config_interaction( + config_by_alias: Union[bool, None], + config_by_name: Union[bool, None], + runtime_by_alias: Union[bool, None], + runtime_by_name: Union[bool, None], +) -> None: + """This test reflects the priority that applies for config vs runtime validation alias configuration. + + Runtime values take precedence over config values, when set. + By default, by_alias is True and by_name is False. + """ + + if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False: + pytest.skip("Can't have both by_alias and by_name as effectively False") + + core_config = { + **({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}), + **({'validate_by_name': config_by_name} if config_by_name is not None else {}), + } + + @dataclasses.dataclass + class MyDataclass: + my_field: int + + schema = core_schema.dataclass_schema( + MyDataclass, + core_schema.dataclass_args_schema( + 'MyDataclass', + [ + core_schema.dataclass_field( + name='my_field', schema=core_schema.int_schema(), validation_alias='my_alias' + ), + ], + ), + ['my_field'], + config=core_schema.CoreConfig(**core_config), + ) + s = SchemaValidator(schema) + + alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None) + name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None) + + if alias_allowed: + assert dataclasses.asdict( + s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) + ) == {'my_field': 1} + if name_allowed: + assert dataclasses.asdict( + s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) + ) == {'my_field': 1} diff --git a/tests/validators/test_model_fields.py b/tests/validators/test_model_fields.py index e5dd53994..22b8620a8 100644 --- a/tests/validators/test_model_fields.py +++ b/tests/validators/test_model_fields.py @@ -506,9 +506,9 @@ def test_alias_allow_pop(py_and_json: PyAndJson): v = py_and_json( { 'type': 'model-fields', - 'populate_by_name': True, 'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}}, - } + }, + config=CoreConfig(validate_by_name=True), ) assert v.validate_test({'FieldA': '123'}) == ({'field_a': 123}, None, {'field_a'}) assert v.validate_test({'field_a': '123'}) == ({'field_a': 123}, None, {'field_a'}) @@ -517,6 +517,32 @@ def test_alias_allow_pop(py_and_json: PyAndJson): assert v.validate_test({'foobar': '123'}) +def test_only_validate_by_name(py_and_json) -> None: + v = py_and_json( + { + 'type': 'model-fields', + 'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}}, + }, + config=CoreConfig(validate_by_name=True, validate_by_alias=False), + ) + assert v.validate_test({'field_a': '123'}) == ({'field_a': 123}, None, {'field_a'}) + with pytest.raises(ValidationError, match=r'field_a\n +Field required \[type=missing,'): + assert v.validate_test({'FieldA': '123'}) + + +def test_only_allow_alias(py_and_json) -> None: + v = py_and_json( + { + 'type': 'model-fields', + 'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}}, + }, + config=CoreConfig(validate_by_name=False, validate_by_alias=True), + ) + assert v.validate_test({'FieldA': '123'}) == ({'field_a': 123}, None, {'field_a'}) + with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'): + assert v.validate_test({'field_a': '123'}) + + @pytest.mark.parametrize( 'input_value,expected', [ @@ -697,8 +723,8 @@ def test_paths_allow_by_name(py_and_json: PyAndJson, input_value): 'schema': {'type': 'int'}, } }, - 'populate_by_name': True, - } + }, + config=CoreConfig(validate_by_name=True), ) assert v.validate_test(input_value) == ({'field_a': 42}, None, {'field_a'}) @@ -985,8 +1011,8 @@ def test_from_attributes_by_name(): core_schema.model_fields_schema( fields={'a': core_schema.model_field(schema=core_schema.int_schema(), validation_alias='a_alias')}, from_attributes=True, - populate_by_name=True, - ) + ), + config=CoreConfig(validate_by_name=True), ) assert v.validate_python(Cls(a_alias=1)) == ({'a': 1}, None, {'a'}) assert v.validate_python(Cls(a=1)) == ({'a': 1}, None, {'a'}) @@ -1383,9 +1409,9 @@ def test_alias_extra_by_name(py_and_json: PyAndJson): 'type': 'model-fields', 'extra_behavior': 'allow', 'from_attributes': True, - 'populate_by_name': True, 'fields': {'field_a': {'validation_alias': 'FieldA', 'type': 'model-field', 'schema': {'type': 'int'}}}, - } + }, + config=CoreConfig(validate_by_name=True), ) assert v.validate_test({'FieldA': 1}) == ({'field_a': 1}, {}, {'field_a'}) assert v.validate_test({'field_a': 1}) == ({'field_a': 1}, {}, {'field_a'}) @@ -1730,3 +1756,51 @@ def test_extra_behavior_ignore(config: Union[core_schema.CoreConfig, None], sche } ] assert 'not_f' not in m + + +@pytest.mark.parametrize('config_by_alias', [None, True, False]) +@pytest.mark.parametrize('config_by_name', [None, True, False]) +@pytest.mark.parametrize('runtime_by_alias', [None, True, False]) +@pytest.mark.parametrize('runtime_by_name', [None, True, False]) +def test_by_alias_and_name_config_interaction( + config_by_alias: Union[bool, None], + config_by_name: Union[bool, None], + runtime_by_alias: Union[bool, None], + runtime_by_name: Union[bool, None], +) -> None: + """This test reflects the priority that applies for config vs runtime validation alias configuration. + + Runtime values take precedence over config values, when set. + By default, by_alias is True and by_name is False. + """ + + if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False: + pytest.skip("Can't have both by_alias and by_name as effectively False") + + class Model: + def __init__(self, my_field: int) -> None: + self.my_field = my_field + + core_config = { + **({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}), + **({'validate_by_name': config_by_name} if config_by_name is not None else {}), + } + + schema = core_schema.model_schema( + Model, + core_schema.model_fields_schema( + { + 'my_field': core_schema.model_field(core_schema.int_schema(), validation_alias='my_alias'), + } + ), + config=core_schema.CoreConfig(**core_config), + ) + s = SchemaValidator(schema) + + alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None) + name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None) + + if alias_allowed: + assert s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1 + if name_allowed: + assert s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name).my_field == 1 diff --git a/tests/validators/test_typed_dict.py b/tests/validators/test_typed_dict.py index 6e3826a3b..b224b65ea 100644 --- a/tests/validators/test_typed_dict.py +++ b/tests/validators/test_typed_dict.py @@ -393,11 +393,11 @@ def test_alias_allow_pop(py_and_json: PyAndJson): v = py_and_json( { 'type': 'typed-dict', - 'populate_by_name': True, 'fields': { 'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}} }, - } + 'config': {'validate_by_name': True, 'validate_by_alias': True}, + }, ) assert v.validate_test({'FieldA': '123'}) == {'field_a': 123} assert v.validate_test({'field_a': '123'}) == {'field_a': 123} @@ -406,6 +406,36 @@ def test_alias_allow_pop(py_and_json: PyAndJson): assert v.validate_test({'foobar': '123'}) +def test_only_validate_by_name(py_and_json) -> None: + v = py_and_json( + { + 'type': 'typed-dict', + 'fields': { + 'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}} + }, + 'config': {'validate_by_name': True, 'validate_by_alias': False}, + } + ) + assert v.validate_test({'field_a': '123'}) == {'field_a': 123} + with pytest.raises(ValidationError, match=r'field_a\n +Field required \[type=missing,'): + assert v.validate_test({'FieldA': '123'}) + + +def test_only_allow_alias(py_and_json) -> None: + v = py_and_json( + { + 'type': 'typed-dict', + 'fields': { + 'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}} + }, + 'config': {'validate_by_name': False, 'validate_by_alias': True}, + } + ) + assert v.validate_test({'FieldA': '123'}) == {'field_a': 123} + with pytest.raises(ValidationError, match=r'FieldA\n +Field required \[type=missing,'): + assert v.validate_test({'field_a': '123'}) + + @pytest.mark.parametrize( 'input_value,expected', [ @@ -590,8 +620,8 @@ def test_paths_allow_by_name(py_and_json: PyAndJson, input_value): 'schema': {'type': 'int'}, } }, - 'populate_by_name': True, - } + 'config': {'validate_by_name': True}, + }, ) assert v.validate_test(input_value) == {'field_a': 42} @@ -795,11 +825,11 @@ def test_alias_extra_by_name(py_and_json: PyAndJson): { 'type': 'typed-dict', 'extra_behavior': 'allow', - 'populate_by_name': True, 'fields': { 'field_a': {'validation_alias': 'FieldA', 'type': 'typed-dict-field', 'schema': {'type': 'int'}} }, - } + 'config': {'validate_by_name': True}, + }, ) assert v.validate_test({'FieldA': 1}) == {'field_a': 1} assert v.validate_test({'field_a': 1}) == {'field_a': 1} @@ -1165,3 +1195,44 @@ def validate(v, info): del cycle assert_gc(lambda: ref() is None) + + +@pytest.mark.parametrize('config_by_alias', [None, True, False]) +@pytest.mark.parametrize('config_by_name', [None, True, False]) +@pytest.mark.parametrize('runtime_by_alias', [None, True, False]) +@pytest.mark.parametrize('runtime_by_name', [None, True, False]) +def test_by_alias_and_name_config_interaction( + config_by_alias: Union[bool, None], + config_by_name: Union[bool, None], + runtime_by_alias: Union[bool, None], + runtime_by_name: Union[bool, None], +) -> None: + """This test reflects the priority that applies for config vs runtime validation alias configuration. + + Runtime values take precedence over config values, when set. + By default, by_alias is True and by_name is False. + """ + + if config_by_alias is False and config_by_name is False and runtime_by_alias is False and runtime_by_name is False: + pytest.skip("Can't have both by_alias and by_name as effectively False") + + core_config = { + **({'validate_by_alias': config_by_alias} if config_by_alias is not None else {}), + **({'validate_by_name': config_by_name} if config_by_name is not None else {}), + } + + schema = core_schema.typed_dict_schema( + fields={ + 'my_field': core_schema.typed_dict_field(schema=core_schema.int_schema(), validation_alias='my_alias'), + }, + config=core_schema.CoreConfig(**core_config), + ) + s = SchemaValidator(schema) + + alias_allowed = next(x for x in (runtime_by_alias, config_by_alias, True) if x is not None) + name_allowed = next(x for x in (runtime_by_name, config_by_name, False) if x is not None) + + if alias_allowed: + assert s.validate_python({'my_alias': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) == {'my_field': 1} + if name_allowed: + assert s.validate_python({'my_field': 1}, by_alias=runtime_by_alias, by_name=runtime_by_name) == {'my_field': 1}