@@ -22,7 +22,7 @@ def binary_cross_entropy(
22
22
23
23
>>> true_labels = np.array([0, 1, 1, 0, 1])
24
24
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
25
- >>> binary_cross_entropy(true_labels, predicted_probs)
25
+ >>> float( binary_cross_entropy(true_labels, predicted_probs) )
26
26
0.2529995012327421
27
27
>>> true_labels = np.array([0, 1, 1, 0, 1])
28
28
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
420
420
421
421
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
422
422
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
423
- >>> mean_squared_logarithmic_error(true_values, predicted_values)
423
+ >>> float( mean_squared_logarithmic_error(true_values, predicted_values) )
424
424
0.0030860877925181344
425
425
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
426
426
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -459,17 +459,17 @@ def mean_absolute_percentage_error(
459
459
Examples:
460
460
>>> y_true = np.array([10, 20, 30, 40])
461
461
>>> y_pred = np.array([12, 18, 33, 45])
462
- >>> mean_absolute_percentage_error(y_true, y_pred)
462
+ >>> float( mean_absolute_percentage_error(y_true, y_pred) )
463
463
0.13125
464
464
465
465
>>> y_true = np.array([1, 2, 3, 4])
466
466
>>> y_pred = np.array([2, 3, 4, 5])
467
- >>> mean_absolute_percentage_error(y_true, y_pred)
467
+ >>> float( mean_absolute_percentage_error(y_true, y_pred) )
468
468
0.5208333333333333
469
469
470
470
>>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24])
471
471
>>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23])
472
- >>> mean_absolute_percentage_error(y_true, y_pred)
472
+ >>> float( mean_absolute_percentage_error(y_true, y_pred) )
473
473
0.064671076436071
474
474
"""
475
475
if len (y_true ) != len (y_pred ):
@@ -511,7 +511,7 @@ def perplexity_loss(
511
511
... [[0.03, 0.26, 0.21, 0.18, 0.30],
512
512
... [0.28, 0.10, 0.33, 0.15, 0.12]]]
513
513
... )
514
- >>> perplexity_loss(y_true, y_pred)
514
+ >>> float( perplexity_loss(y_true, y_pred) )
515
515
5.0247347775367945
516
516
>>> y_true = np.array([[1, 4], [2, 3]])
517
517
>>> y_pred = np.array(
0 commit comments