Skip to content

Commit d0e5fb6

Browse files
committed
Handling scalars in tests with NEP 51 numpy
1 parent 31c424f commit d0e5fb6

32 files changed

+102
-100
lines changed

computer_vision/haralick_descriptors.py

+15-13
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float
1919
>>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2]))
2020
3.1622776601683795
2121
"""
22-
return np.sqrt(((original - reference) ** 2).mean())
22+
return float(np.sqrt(((original - reference) ** 2).mean()))
2323

2424

2525
def normalize_image(
@@ -298,16 +298,18 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]:
298298

299299
# Sum values for descriptors ranging from the first one to the last,
300300
# as all are their respective origin matrix and not the resulting value yet.
301-
return [
302-
maximum_prob,
303-
correlation.sum(),
304-
energy.sum(),
305-
contrast.sum(),
306-
dissimilarity.sum(),
307-
inverse_difference.sum(),
308-
homogeneity.sum(),
309-
entropy.sum(),
310-
]
301+
return np.array(
302+
[
303+
maximum_prob,
304+
correlation.sum(),
305+
energy.sum(),
306+
contrast.sum(),
307+
dissimilarity.sum(),
308+
inverse_difference.sum(),
309+
homogeneity.sum(),
310+
entropy.sum(),
311+
]
312+
).tolist()
311313

312314

313315
def get_descriptors(
@@ -335,7 +337,7 @@ def get_descriptors(
335337
return np.concatenate(descriptors, axis=None)
336338

337339

338-
def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32:
340+
def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float:
339341
"""
340342
Simple method for calculating the euclidean distance between two points,
341343
with type np.ndarray.
@@ -346,7 +348,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32:
346348
>>> euclidean(a, b)
347349
3.3166247903554
348350
"""
349-
return np.sqrt(np.sum(np.square(point_1 - point_2)))
351+
return float(np.sqrt(np.sum(np.square(point_1 - point_2))))
350352

351353

352354
def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]:

data_structures/heap/binomial_heap.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ def delete_min(self):
273273
# Update min_node
274274
self.min_node = None
275275

276-
return min_value
276+
return int(min_value)
277277
# No right subtree corner case
278278
# The structure of the tree implies that this should be the bottom root
279279
# and there is at least one other root
@@ -292,7 +292,7 @@ def delete_min(self):
292292
if i.val < self.min_node.val:
293293
self.min_node = i
294294
i = i.parent
295-
return min_value
295+
return int(min_value)
296296
# General case
297297
# Find the BinomialHeap of the right subtree of min_node
298298
bottom_of_new = self.min_node.right
@@ -312,7 +312,7 @@ def delete_min(self):
312312
self.bottom_root = bottom_of_new
313313
self.min_node = min_of_new
314314
# print("Single root, multiple nodes case")
315-
return min_value
315+
return int(min_value)
316316
# Remaining cases
317317
# Construct heap of right subtree
318318
new_heap = BinomialHeap(
@@ -354,7 +354,7 @@ def delete_min(self):
354354
# Merge heaps
355355
self.merge_heaps(new_heap)
356356

357-
return min_value
357+
return int(min_value)
358358

359359
def pre_order(self):
360360
"""

electronics/circular_convolution.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def circular_convolution(self) -> list[float]:
9191
final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal))
9292

9393
# rounding-off to two decimal places
94-
return [round(i, 2) for i in final_signal]
94+
return np.array([round(i, 2) for i in final_signal]).tolist()
9595

9696

9797
if __name__ == "__main__":

fractals/julia_sets.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -40,11 +40,11 @@
4040
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
4141
"""
4242
Evaluate $e^z + c$.
43-
>>> eval_exponential(0, 0)
43+
>>> eval_exponential(0, 0).item()
4444
1.0
45-
>>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15
45+
>>> (abs(eval_exponential(1, np.pi*1.j)) < 1e-15).item()
4646
True
47-
>>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15
47+
>>> (abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15).item()
4848
True
4949
"""
5050
return np.exp(z_values) + c_parameter
@@ -101,17 +101,17 @@ def iterate_function(
101101
>>> np.round(iterate_function(eval_quadratic_polynomial,
102102
... 0,
103103
... 3,
104-
... np.array([0,1,2]))[0])
104+
... np.array([0,1,2]))[0]).item()
105105
0j
106106
>>> np.round(iterate_function(eval_quadratic_polynomial,
107107
... 0,
108108
... 3,
109-
... np.array([0,1,2]))[1])
109+
... np.array([0,1,2]))[1]).item()
110110
(1+0j)
111111
>>> np.round(iterate_function(eval_quadratic_polynomial,
112112
... 0,
113113
... 3,
114-
... np.array([0,1,2]))[2])
114+
... np.array([0,1,2]))[2]).item()
115115
(256+0j)
116116
"""
117117

graphics/bezier_curve.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def basis_function(self, t: float) -> list[float]:
4444
)
4545
# the basis must sum up to 1 for it to produce a valid Bezier curve.
4646
assert round(sum(output_values), 5) == 1
47-
return output_values
47+
return [float(i) for i in output_values]
4848

4949
def bezier_curve_function(self, t: float) -> tuple[float, float]:
5050
"""

graphs/dijkstra_binary_grid.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def dijkstra(
6969
x, y = predecessors[x, y]
7070
path.append(source) # add the source manually
7171
path.reverse()
72-
return matrix[destination], path
72+
return float(matrix[destination]), path
7373

7474
for i in range(len(dx)):
7575
nx, ny = x + dx[i], y + dy[i]
@@ -80,7 +80,7 @@ def dijkstra(
8080
matrix[nx, ny] = dist + 1
8181
predecessors[nx, ny] = (x, y)
8282

83-
return np.inf, []
83+
return float(np.inf), []
8484

8585

8686
if __name__ == "__main__":

linear_algebra/src/power_iteration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def power_iteration(
7878
if is_complex:
7979
lambda_ = np.real(lambda_)
8080

81-
return lambda_, vector
81+
return float(lambda_), vector
8282

8383

8484
def test_power_iteration() -> None:

linear_programming/simplex.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def find_pivot(self) -> tuple[Any, Any]:
144144
# Arg of minimum quotient excluding the nan values. n_stages is added
145145
# to compensate for earlier exclusion of objective columns
146146
row_idx = np.nanargmin(quotients) + self.n_stages
147-
return row_idx, col_idx
147+
return row_idx.item(), col_idx.item()
148148

149149
def pivot(self, row_idx: int, col_idx: int) -> np.ndarray:
150150
"""Pivots on value on the intersection of pivot row and column.
@@ -315,7 +315,7 @@ def interpret_tableau(self) -> dict[str, float]:
315315
{'P': 5.0, 'x1': 1.0, 'x2': 1.0}
316316
"""
317317
# P = RHS of final tableau
318-
output_dict = {"P": abs(self.tableau[0, -1])}
318+
output_dict = {"P": float(abs(self.tableau[0, -1]))}
319319

320320
for i in range(self.n_vars):
321321
# Gives indices of nonzero entries in the ith column
@@ -329,7 +329,7 @@ def interpret_tableau(self) -> dict[str, float]:
329329
# If there is only one nonzero value in column, which is one
330330
if n_nonzero == 1 and nonzero_val == 1:
331331
rhs_val = self.tableau[nonzero_rowidx, -1]
332-
output_dict[self.col_titles[i]] = rhs_val
332+
output_dict[self.col_titles[i]] = float(rhs_val)
333333
return output_dict
334334

335335

machine_learning/decision_tree.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def mean_squared_error(self, labels, prediction):
4040
if labels.ndim != 1:
4141
print("Error: Input labels must be one dimensional")
4242

43-
return np.mean((labels - prediction) ** 2)
43+
return np.mean((labels - prediction) ** 2).item()
4444

4545
def train(self, x, y):
4646
"""

machine_learning/forecasting/run.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def linear_regression_prediction(
3434
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
3535
y = np.array(train_usr)
3636
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
37-
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
37+
return float(abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]))
3838

3939

4040
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
@@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) ->
5656
)
5757
model_fit = model.fit(disp=False, maxiter=600, method="nm")
5858
result = model_fit.predict(1, len(test_match), exog=[test_match])
59-
return result[0]
59+
return float(result[0])
6060

6161

6262
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
@@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f
7575
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
7676
regressor.fit(x_train, train_user)
7777
y_pred = regressor.predict(x_test)
78-
return y_pred[0]
78+
return float(y_pred[0])
7979

8080

8181
def interquartile_range_checker(train_user: list) -> float:
@@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float:
9292
q3 = np.percentile(train_user, 75)
9393
iqr = q3 - q1
9494
low_lim = q1 - (iqr * 0.1)
95-
return low_lim
95+
return float(low_lim)
9696

9797

9898
def data_safety_checker(list_vote: list, actual_result: float) -> bool:

machine_learning/k_nearest_neighbours.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float:
4242
>>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
4343
10.0
4444
"""
45-
return np.linalg.norm(a - b)
45+
return float(np.linalg.norm(a - b))
4646

4747
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
4848
"""

machine_learning/logistic_regression.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
4545
@returns: returns value in the range 0 to 1
4646
4747
Examples:
48-
>>> sigmoid_function(4)
48+
>>> float(sigmoid_function(4))
4949
0.9820137900379085
5050
>>> sigmoid_function(np.array([-3, 3]))
5151
array([0.04742587, 0.95257413])
@@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float:
100100
References:
101101
- https://en.wikipedia.org/wiki/Logistic_regression
102102
"""
103-
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
103+
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
104104

105105

106106
def log_likelihood(x, y, weights):

machine_learning/loss_functions.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def binary_cross_entropy(
3636

3737
y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0)
3838
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
39-
return np.mean(bce_loss)
39+
return float(np.mean(bce_loss))
4040

4141

4242
def binary_focal_cross_entropy(
@@ -87,7 +87,7 @@ def binary_focal_cross_entropy(
8787
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred)
8888
)
8989

90-
return np.mean(bcfe_loss)
90+
return float(np.mean(bcfe_loss))
9191

9292

9393
def categorical_cross_entropy(
@@ -145,7 +145,7 @@ def categorical_cross_entropy(
145145
raise ValueError("Predicted probabilities must sum to approximately 1.")
146146

147147
y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0)
148-
return -np.sum(y_true * np.log(y_pred))
148+
return float(-np.sum(y_true * np.log(y_pred)))
149149

150150

151151
def categorical_focal_cross_entropy(
@@ -247,7 +247,7 @@ def categorical_focal_cross_entropy(
247247
alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1
248248
)
249249

250-
return np.mean(cfce_loss)
250+
return float(np.mean(cfce_loss))
251251

252252

253253
def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
@@ -287,7 +287,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
287287
raise ValueError("y_true can have values -1 or 1 only.")
288288

289289
hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred))
290-
return np.mean(hinge_losses)
290+
return float(np.mean(hinge_losses))
291291

292292

293293
def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
@@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
309309
310310
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
311311
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
312-
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
312+
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102).item()
313313
True
314314
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
315315
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
316-
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
316+
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164).item()
317317
True
318318
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
319319
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
@@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
347347
348348
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
349349
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
350-
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
350+
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028).item()
351351
True
352352
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
353353
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
381381
382382
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
383383
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
384-
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)
384+
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16).item()
385385
True
386386
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
387387
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
388-
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)
388+
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16).item()
389389
False
390390
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
391391
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
@@ -433,7 +433,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
433433
raise ValueError("Input arrays must have the same length.")
434434

435435
squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
436-
return np.mean(squared_logarithmic_errors)
436+
return float(np.mean(squared_logarithmic_errors))
437437

438438

439439
def mean_absolute_percentage_error(
@@ -478,7 +478,7 @@ def mean_absolute_percentage_error(
478478
y_true = np.where(y_true == 0, epsilon, y_true)
479479
absolute_percentage_diff = np.abs((y_true - y_pred) / y_true)
480480

481-
return np.mean(absolute_percentage_diff)
481+
return float(np.mean(absolute_percentage_diff))
482482

483483

484484
def perplexity_loss(
@@ -570,7 +570,7 @@ def perplexity_loss(
570570
# Calculating perplexity for each sentence
571571
perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1)))
572572

573-
return np.mean(perp_losses)
573+
return float(np.mean(perp_losses))
574574

575575

576576
def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float:
@@ -626,7 +626,7 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
626626

627627
diff = np.abs(y_true - y_pred)
628628
loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
629-
return np.mean(loss)
629+
return float(np.mean(loss))
630630

631631

632632
def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float:
@@ -660,7 +660,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
660660
raise ValueError("Input arrays must have the same length.")
661661

662662
kl_loss = y_true * np.log(y_true / y_pred)
663-
return np.sum(kl_loss)
663+
return float(np.sum(kl_loss))
664664

665665

666666
if __name__ == "__main__":

0 commit comments

Comments
 (0)