Skip to content

Commit 93545d6

Browse files
committed
add ut test files test/functorch/test_ops.py and test/test_unary_ufuncs.py
1 parent ebd04c2 commit 93545d6

File tree

9 files changed

+6344
-304
lines changed

9 files changed

+6344
-304
lines changed

test/xpu/functorch/common_utils.py

Lines changed: 640 additions & 0 deletions
Large diffs are not rendered by default.

test/xpu/functorch/functorch_additional_op_db.py

Lines changed: 814 additions & 0 deletions
Large diffs are not rendered by default.

test/xpu/functorch/test_ops_functorch_xpu.py

Lines changed: 3032 additions & 0 deletions
Large diffs are not rendered by default.

test/xpu/skip_list_common.py

Lines changed: 2 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -677,58 +677,7 @@
677677
# Accumulate error due to different accumulation order.
678678
"test_logcumsumexp_complex_xpu_complex64",
679679
),
680-
"test_unary_ufuncs_xpu.py": (
681-
# AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available.
682-
"_jiterator_",
683-
# For extreme value processing, Numpy and XPU results are inconsistent
684-
# std operations get different behavior on std::complex operarands for extremal cases
685-
"test_reference_numerics_extremal__refs_log_xpu_complex64",
686-
"test_reference_numerics_extremal_log_xpu_complex64",
687-
"test_reference_numerics_extremal__refs_acos_xpu_complex64",
688-
"test_reference_numerics_extremal__refs_acosh_xpu_complex64",
689-
"test_reference_numerics_extremal_acos_xpu_complex64",
690-
"test_reference_numerics_extremal_acosh_xpu_complex64",
691-
"test_reference_numerics_extremal__refs_asinh_xpu_complex64",
692-
"test_reference_numerics_extremal_asinh_xpu_complex64",
693-
"test_reference_numerics_extremal__refs_asin_xpu_complex64",
694-
"test_reference_numerics_extremal_asin_xpu_complex64",
695-
"test_reference_numerics_large__refs_acosh_xpu_complex64",
696-
"test_reference_numerics_large_acosh_xpu_complex64",
697-
"test_reference_numerics_extremal__refs_log10_xpu_complex64",
698-
"test_reference_numerics_extremal__refs_log1p_xpu_complex64",
699-
"test_reference_numerics_extremal_log10_xpu_complex64",
700-
"test_reference_numerics_extremal_log1p_xpu_complex64",
701-
"test_reference_numerics_large__refs_asinh_xpu_complex128",
702-
"test_reference_numerics_large__refs_asinh_xpu_complex64",
703-
"test_reference_numerics_large__refs_asinh_xpu_complex32",
704-
"test_reference_numerics_large_asinh_xpu_complex128",
705-
"test_reference_numerics_large_asinh_xpu_complex64",
706-
"test_reference_numerics_large_asinh_xpu_complex32",
707-
# AssertionError: Tensor-likes are not close!
708-
# exceeded maximum allowed difference
709-
# Greatest absolute difference: 6.266784475883469e-05 at index (463, 204) (up to 1e-05 allowed)
710-
# Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed)
711-
"test_reference_numerics_normal__refs_asinh_xpu_complex64",
712-
"test_reference_numerics_normal_asinh_xpu_complex64",
713-
# Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue.
714-
"test_reference_numerics_large_rsqrt_xpu_complex32",
715-
# Numeric difference
716-
# https://github.com/intel/torch-xpu-ops/issues/544
717-
# Expected 0.00497517 but got 0.00497520063072443.
718-
# Absolute difference: 3.063072442997111e-08 (up to 0.0 allowed)
719-
# Relative difference: 6.156719153309558e-06 (up to 1e-06 allowed)
720-
"test_log1p_complex_xpu_complex64",
721-
# Issue: https://github.com/intel/torch-xpu-ops/issues/622
722-
# Mismatched elements: 8 / 943593 (0.0%)
723-
# Greatest absolute difference: inf at index (9, 860) (up to 0.001 allowed)
724-
# Greatest relative difference: inf at index (9, 860) (up to 0.0012 allowed)
725-
"test_reference_numerics_normal_polygamma_polygamma_n_1_xpu_float16",
726-
"test_reference_numerics_normal_polygamma_polygamma_n_2_xpu_float16",
727-
"test_reference_numerics_normal_polygamma_polygamma_n_3_xpu_float16",
728-
"test_reference_numerics_normal_polygamma_polygamma_n_4_xpu_float16",
729-
# CUDA XFAIL
730-
"test_reference_numerics_large__refs_rsqrt_xpu_complex32",
731-
),
680+
"test_unary_ufuncs_xpu.py": None,
732681
"test_masked_xpu.py": None,
733682
"test_view_ops_xpu.py": (
734683
# Need quantization support, NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend.
@@ -1924,4 +1873,5 @@
19241873
"test_sparse_matmul_xpu_float64", # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN
19251874
"test_sparse_mm_xpu_float64", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
19261875
),
1876+
"functorch/test_ops_functorch_xpu.py": None,
19271877
}

test/xpu/skip_list_win.py

Lines changed: 2 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -25,47 +25,6 @@
2525
"test_copy_from_dlpack_xpu_uint8",
2626
),
2727
# SYCL compiler issue where host and device results differ for math ops with complex dtypes
28-
"test_unary_ufuncs_xpu.py": (
29-
"test_reference_numerics_extremal__refs_atanh_xpu_complex128",
30-
"test_reference_numerics_extremal__refs_atanh_xpu_complex64",
31-
"test_reference_numerics_extremal__refs_nn_functional_tanhshrink_xpu_complex128",
32-
"test_reference_numerics_extremal__refs_sin_xpu_complex128",
33-
"test_reference_numerics_extremal__refs_sin_xpu_complex64",
34-
"test_reference_numerics_extremal__refs_sinh_xpu_complex128",
35-
"test_reference_numerics_extremal__refs_sinh_xpu_complex64",
36-
"test_reference_numerics_extremal__refs_tan_xpu_complex128",
37-
"test_reference_numerics_extremal__refs_tan_xpu_complex64",
38-
"test_reference_numerics_extremal_atanh_xpu_complex128",
39-
"test_reference_numerics_extremal_atanh_xpu_complex64",
40-
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex128",
41-
"test_reference_numerics_extremal_sin_xpu_complex128",
42-
"test_reference_numerics_extremal_sin_xpu_complex64",
43-
"test_reference_numerics_extremal_sinh_xpu_complex128",
44-
"test_reference_numerics_extremal_sinh_xpu_complex64",
45-
"test_reference_numerics_extremal_square_xpu_complex128",
46-
"test_reference_numerics_extremal_square_xpu_complex64",
47-
"test_reference_numerics_extremal_tan_xpu_complex128",
48-
"test_reference_numerics_extremal_tan_xpu_complex64",
49-
"test_reference_numerics_large__refs_cos_xpu_complex128",
50-
"test_reference_numerics_large__refs_cos_xpu_complex32",
51-
"test_reference_numerics_large__refs_cos_xpu_complex64",
52-
"test_reference_numerics_large__refs_cosh_xpu_complex32",
53-
"test_reference_numerics_large__refs_exp_xpu_complex128",
54-
"test_reference_numerics_large__refs_exp_xpu_complex32",
55-
"test_reference_numerics_large__refs_exp_xpu_complex64",
56-
"test_reference_numerics_large__refs_sin_xpu_complex128",
57-
"test_reference_numerics_large__refs_sin_xpu_complex32",
58-
"test_reference_numerics_large__refs_sin_xpu_complex64",
59-
"test_reference_numerics_large__refs_sinh_xpu_complex32",
60-
"test_reference_numerics_large__refs_tan_xpu_complex32",
61-
"test_reference_numerics_large_cos_xpu_complex128",
62-
"test_reference_numerics_large_cos_xpu_complex32",
63-
"test_reference_numerics_large_cos_xpu_complex64",
64-
"test_reference_numerics_large_exp_xpu_complex128",
65-
"test_reference_numerics_large_exp_xpu_complex64",
66-
"test_reference_numerics_large_sin_xpu_complex128",
67-
"test_reference_numerics_large_sin_xpu_complex32",
68-
"test_reference_numerics_large_sin_xpu_complex64",
69-
"test_reference_numerics_small_acos_xpu_complex32",
70-
),
28+
"test_unary_ufuncs_xpu.py": None,
29+
"functorch/test_ops_functorch_xpu.py": None,
7130
}

test/xpu/skip_list_win_arc.py

Lines changed: 2 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -32,148 +32,6 @@
3232
"test_non_contig_pow_xpu_complex64",
3333
),
3434
"test_nn_xpu.py": ("test_adaptiveavg_pool1d_shmem_xpu",),
35-
"test_unary_ufuncs_xpu.py": (
36-
"test_batch_vs_slicing__refs_acos_xpu_complex64",
37-
"test_batch_vs_slicing__refs_acosh_xpu_complex64",
38-
"test_batch_vs_slicing__refs_log_xpu_complex64",
39-
"test_batch_vs_slicing__refs_sqrt_xpu_complex64",
40-
"test_batch_vs_slicing_acos_xpu_complex32",
41-
"test_batch_vs_slicing_acos_xpu_complex64",
42-
"test_batch_vs_slicing_acosh_xpu_complex32",
43-
"test_batch_vs_slicing_acosh_xpu_complex64",
44-
"test_batch_vs_slicing_log_xpu_complex32",
45-
"test_batch_vs_slicing_log_xpu_complex64",
46-
"test_batch_vs_slicing_sqrt_xpu_complex32",
47-
"test_batch_vs_slicing_sqrt_xpu_complex64",
48-
"test_batch_vs_slicing_square_xpu_complex64",
49-
"test_contig_size1__refs_acos_xpu_complex64",
50-
"test_contig_size1__refs_acosh_xpu_complex64",
51-
"test_contig_size1__refs_log_xpu_complex64",
52-
"test_contig_size1__refs_sqrt_xpu_complex64",
53-
"test_contig_size1_acos_xpu_complex32",
54-
"test_contig_size1_acos_xpu_complex64",
55-
"test_contig_size1_acosh_xpu_complex32",
56-
"test_contig_size1_acosh_xpu_complex64",
57-
"test_contig_size1_large_dim__refs_acos_xpu_complex64",
58-
"test_contig_size1_large_dim__refs_acosh_xpu_complex64",
59-
"test_contig_size1_large_dim__refs_log_xpu_complex64",
60-
"test_contig_size1_large_dim__refs_sqrt_xpu_complex64",
61-
"test_contig_size1_large_dim_acos_xpu_complex32",
62-
"test_contig_size1_large_dim_acos_xpu_complex64",
63-
"test_contig_size1_large_dim_acosh_xpu_complex32",
64-
"test_contig_size1_large_dim_acosh_xpu_complex64",
65-
"test_contig_size1_large_dim_log_xpu_complex32",
66-
"test_contig_size1_large_dim_log_xpu_complex64",
67-
"test_contig_size1_large_dim_sqrt_xpu_complex32",
68-
"test_contig_size1_large_dim_sqrt_xpu_complex64",
69-
"test_contig_size1_large_dim_square_xpu_complex64",
70-
"test_contig_size1_log_xpu_complex32",
71-
"test_contig_size1_log_xpu_complex64",
72-
"test_contig_size1_sqrt_xpu_complex32",
73-
"test_contig_size1_sqrt_xpu_complex64",
74-
"test_contig_size1_square_xpu_complex64",
75-
"test_contig_vs_every_other__refs_acos_xpu_complex64",
76-
"test_contig_vs_every_other__refs_acosh_xpu_complex64",
77-
"test_contig_vs_every_other__refs_log_xpu_complex64",
78-
"test_contig_vs_every_other__refs_sqrt_xpu_complex64",
79-
"test_contig_vs_every_other_acos_xpu_complex32",
80-
"test_contig_vs_every_other_acos_xpu_complex64",
81-
"test_contig_vs_every_other_acosh_xpu_complex32",
82-
"test_contig_vs_every_other_acosh_xpu_complex64",
83-
"test_contig_vs_every_other_log_xpu_complex32",
84-
"test_contig_vs_every_other_log_xpu_complex64",
85-
"test_contig_vs_every_other_sqrt_xpu_complex32",
86-
"test_contig_vs_every_other_sqrt_xpu_complex64",
87-
"test_contig_vs_every_other_square_xpu_complex64",
88-
"test_contig_vs_transposed__refs_acos_xpu_complex64",
89-
"test_contig_vs_transposed__refs_acosh_xpu_complex64",
90-
"test_contig_vs_transposed__refs_log_xpu_complex64",
91-
"test_contig_vs_transposed__refs_sqrt_xpu_complex64",
92-
"test_contig_vs_transposed_acos_xpu_complex32",
93-
"test_contig_vs_transposed_acos_xpu_complex64",
94-
"test_contig_vs_transposed_acosh_xpu_complex32",
95-
"test_contig_vs_transposed_acosh_xpu_complex64",
96-
"test_contig_vs_transposed_log_xpu_complex32",
97-
"test_contig_vs_transposed_log_xpu_complex64",
98-
"test_contig_vs_transposed_sqrt_xpu_complex32",
99-
"test_contig_vs_transposed_sqrt_xpu_complex64",
100-
"test_contig_vs_transposed_square_xpu_complex64",
101-
"test_non_contig__refs_acos_xpu_complex64",
102-
"test_non_contig__refs_acosh_xpu_complex64",
103-
"test_non_contig__refs_log_xpu_complex64",
104-
"test_non_contig__refs_sqrt_xpu_complex64",
105-
"test_non_contig_acos_xpu_complex32",
106-
"test_non_contig_acos_xpu_complex64",
107-
"test_non_contig_acosh_xpu_complex32",
108-
"test_non_contig_acosh_xpu_complex64",
109-
"test_non_contig_expand__refs_acos_xpu_complex64",
110-
"test_non_contig_expand__refs_acosh_xpu_complex64",
111-
"test_non_contig_expand__refs_log_xpu_complex64",
112-
"test_non_contig_expand__refs_sqrt_xpu_complex64",
113-
"test_non_contig_expand_acos_xpu_complex32",
114-
"test_non_contig_expand_acos_xpu_complex64",
115-
"test_non_contig_expand_acosh_xpu_complex32",
116-
"test_non_contig_expand_acosh_xpu_complex64",
117-
"test_non_contig_expand_log_xpu_complex32",
118-
"test_non_contig_expand_log_xpu_complex64",
119-
"test_non_contig_expand_sqrt_xpu_complex32",
120-
"test_non_contig_expand_sqrt_xpu_complex64",
121-
"test_non_contig_expand_square_xpu_complex64",
122-
"test_non_contig_index__refs_acos_xpu_complex64",
123-
"test_non_contig_index__refs_acosh_xpu_complex64",
124-
"test_non_contig_index__refs_log_xpu_complex64",
125-
"test_non_contig_index__refs_sqrt_xpu_complex64",
126-
"test_non_contig_index_acos_xpu_complex32",
127-
"test_non_contig_index_acos_xpu_complex64",
128-
"test_non_contig_index_acosh_xpu_complex32",
129-
"test_non_contig_index_acosh_xpu_complex64",
130-
"test_non_contig_index_log_xpu_complex32",
131-
"test_non_contig_index_log_xpu_complex64",
132-
"test_non_contig_index_sqrt_xpu_complex32",
133-
"test_non_contig_index_sqrt_xpu_complex64",
134-
"test_non_contig_index_square_xpu_complex64",
135-
"test_non_contig_log_xpu_complex32",
136-
"test_non_contig_log_xpu_complex64",
137-
"test_non_contig_sqrt_xpu_complex32",
138-
"test_non_contig_sqrt_xpu_complex64",
139-
"test_non_contig_square_xpu_complex64",
140-
"test_reference_numerics_extremal__refs_sqrt_xpu_complex64",
141-
"test_reference_numerics_extremal_sqrt_xpu_complex64",
142-
"test_reference_numerics_large__refs_acos_xpu_complex64",
143-
"test_reference_numerics_large__refs_log_xpu_complex64",
144-
"test_reference_numerics_large__refs_sqrt_xpu_complex64",
145-
"test_reference_numerics_large_acos_xpu_complex32",
146-
"test_reference_numerics_large_acos_xpu_complex64",
147-
"test_reference_numerics_large_acosh_xpu_complex32",
148-
"test_reference_numerics_large_log_xpu_complex32",
149-
"test_reference_numerics_large_log_xpu_complex64",
150-
"test_reference_numerics_large_sqrt_xpu_complex32",
151-
"test_reference_numerics_large_sqrt_xpu_complex64",
152-
"test_reference_numerics_normal__refs_acos_xpu_complex64",
153-
"test_reference_numerics_normal__refs_acosh_xpu_complex64",
154-
"test_reference_numerics_normal__refs_log_xpu_complex64",
155-
"test_reference_numerics_normal__refs_sqrt_xpu_complex64",
156-
"test_reference_numerics_normal_acos_xpu_complex32",
157-
"test_reference_numerics_normal_acos_xpu_complex64",
158-
"test_reference_numerics_normal_acosh_xpu_complex32",
159-
"test_reference_numerics_normal_acosh_xpu_complex64",
160-
"test_reference_numerics_normal_log_xpu_complex32",
161-
"test_reference_numerics_normal_log_xpu_complex64",
162-
"test_reference_numerics_normal_sqrt_xpu_complex32",
163-
"test_reference_numerics_normal_sqrt_xpu_complex64",
164-
"test_reference_numerics_normal_square_xpu_complex64",
165-
"test_reference_numerics_small__refs_acos_xpu_complex64",
166-
"test_reference_numerics_small__refs_acosh_xpu_complex64",
167-
"test_reference_numerics_small__refs_log_xpu_complex64",
168-
"test_reference_numerics_small__refs_sqrt_xpu_complex64",
169-
"test_reference_numerics_small_acos_xpu_complex32",
170-
"test_reference_numerics_small_acos_xpu_complex64",
171-
"test_reference_numerics_small_acosh_xpu_complex32",
172-
"test_reference_numerics_small_acosh_xpu_complex64",
173-
"test_reference_numerics_small_log_xpu_complex32",
174-
"test_reference_numerics_small_log_xpu_complex64",
175-
"test_reference_numerics_small_sqrt_xpu_complex32",
176-
"test_reference_numerics_small_sqrt_xpu_complex64",
177-
"test_reference_numerics_small_square_xpu_complex64",
178-
),
35+
"test_unary_ufuncs_xpu.py": None,
36+
"functorch/test_ops_functorch_xpu.py": None,
17937
}

test/xpu/skip_list_win_bmg.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,6 @@
11
skip_dict = {
22
# tensor(0.-0.j, device='xpu:0', dtype=torch.complex32) tensor(nan+nanj, device='xpu:0', dtype=torch.complex32) (1.5707964+0j)
3-
"test_unary_ufuncs_xpu.pyy": (
4-
"test_reference_numerics_small_acos_xpu_complex32",
5-
"test_reference_numerics_small_asin_xpu_complex32",
6-
"test_reference_numerics_small_asinh_xpu_complex32",
7-
"test_reference_numerics_small_atan_xpu_complex32",
8-
"test_reference_numerics_small_atanh_xpu_complex32",
9-
# Need to check compiler std::sin() on inf+infj
10-
"test_reference_numerics_extremal__refs_sin_xpu_complex128",
11-
"test_reference_numerics_extremal__refs_sin_xpu_complex64",
12-
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex128",
13-
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex64",
14-
"test_reference_numerics_extremal_sin_xpu_complex128",
15-
"test_reference_numerics_extremal_sin_xpu_complex64",
16-
"test_reference_numerics_extremal_sinh_xpu_complex128",
17-
"test_reference_numerics_extremal_sinh_xpu_complex64",
18-
"test_reference_numerics_large__refs_sin_xpu_complex32",
19-
"test_reference_numerics_large_sin_xpu_complex32",
20-
# Known issue of exp accuracy
21-
# tensor(13437.7000-501.j, device='xpu:0', dtype=torch.complex128) tensor(inf+infj, device='xpu:0', dtype=torch.complex128) (-inf+infj)
22-
"test_reference_numerics_large__refs_exp_xpu_complex128",
23-
"test_reference_numerics_large_exp_xpu_complex128",
24-
"test_reference_numerics_small_exp_xpu_complex32",
25-
":test_reference_numerics_normal_special_i1_xpu_float32",
26-
"test_reference_numerics_normal_sigmoid_xpu_complex32",
27-
"test_reference_numerics_small_sigmoid_xpu_complex32",
28-
),
3+
"test_unary_ufuncs_xpu.py": None,
294
# https://github.com/intel/torch-xpu-ops/issues/1171
305
# AssertionError: 'Assertion maxind >= 0 && maxind < outputImageSize failed' not found in '\nAssertHandler::printMessage\n' : The expected error was not found
316
"nn\test_pooling_xpu.py": (
@@ -36,4 +11,5 @@
3611
"test_MaxUnpool_index_errors_case7_xpu",
3712
"test_MaxUnpool_index_errors_case9_xpu",
3813
),
14+
"functorch/test_ops_functorch_xpu.py": None,
3915
}

test/xpu/skip_list_win_lnl.py

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,6 @@
11
skip_dict = {
22
# tensor(0.-0.j, device='xpu:0', dtype=torch.complex32) tensor(nan+nanj, device='xpu:0', dtype=torch.complex32) (1.5707964+0j)
3-
"test_unary_ufuncs_xpu.pyy": (
4-
"test_reference_numerics_small_acos_xpu_complex32",
5-
"test_reference_numerics_small_asin_xpu_complex32",
6-
"test_reference_numerics_small_asinh_xpu_complex32",
7-
"test_reference_numerics_small_atan_xpu_complex32",
8-
"test_reference_numerics_small_atanh_xpu_complex32",
9-
# Need to check compiler std::sin() on inf+infj
10-
"test_reference_numerics_extremal__refs_sin_xpu_complex128",
11-
"test_reference_numerics_extremal__refs_sin_xpu_complex64",
12-
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex128",
13-
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex64",
14-
"test_reference_numerics_extremal_sin_xpu_complex128",
15-
"test_reference_numerics_extremal_sin_xpu_complex64",
16-
"test_reference_numerics_extremal_sinh_xpu_complex128",
17-
"test_reference_numerics_extremal_sinh_xpu_complex64",
18-
"test_reference_numerics_large__refs_sin_xpu_complex32",
19-
"test_reference_numerics_large_sin_xpu_complex32",
20-
# Known issue of exp accuracy
21-
# tensor(13437.7000-501.j, device='xpu:0', dtype=torch.complex128) tensor(inf+infj, device='xpu:0', dtype=torch.complex128) (-inf+infj)
22-
"test_reference_numerics_large__refs_exp_xpu_complex128",
23-
"test_reference_numerics_large_exp_xpu_complex128",
24-
"test_reference_numerics_small_exp_xpu_complex32",
25-
":test_reference_numerics_normal_special_i1_xpu_float32",
26-
"test_reference_numerics_normal_sigmoid_xpu_complex32",
27-
),
3+
"test_unary_ufuncs_xpu.py": None,
284
# https://github.com/intel/torch-xpu-ops/issues/1171
295
# AssertionError: 'Assertion maxind >= 0 && maxind < outputImageSize failed' not found in '\nAssertHandler::printMessage\n' : The expected error was not found
306
"nn\test_pooling_xpu.py": (
@@ -35,4 +11,5 @@
3511
"test_MaxUnpool_index_errors_case7_xpu",
3612
"test_MaxUnpool_index_errors_case9_xpu",
3713
),
14+
"functorch/test_ops_functorch_xpu.py": None,
3815
}

0 commit comments

Comments
 (0)