Skip to content

Commit f9e2901

Browse files
committed
Merge branch 'master' into zwy2
2 parents e0f8acc + ec7164e commit f9e2901

8 files changed

+18
-17
lines changed

python/jittor/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
# This file is subject to the terms and conditions defined in
88
# file 'LICENSE.txt', which is part of this source code package.
99
# ***************************************************************
10-
__version__ = '1.1.7.4'
10+
__version__ = '1.1.7.5'
1111
from . import lock
1212
with lock.lock_scope():
1313
from . import compiler

python/jittor/test/test_clone.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def test2(self):
2525
a = jt.array([1,2])
2626
print(a.detach())
2727

28-
@jt.flag_scope(eager_execution=1)
28+
@jt.flag_scope(lazy_execution=0)
2929
def test3(self):
3030
a = jt.array([1,2])
3131
print(a.detach())

python/jittor/test/test_function.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -284,10 +284,10 @@ def grad(self, grad0, _, grad1):
284284
class TestFunctionWithEagerExecution(TestFunction):
285285
@classmethod
286286
def setUpClass(self):
287-
jt.flags.eager_execution = 1
287+
jt.flags.lazy_execution = 0
288288
@classmethod
289289
def tearDownClass(self):
290-
jt.flags.eager_execution = 0
290+
jt.flags.lazy_execution = 1
291291

292292
if __name__ == "__main__":
293293
unittest.main()

python/jittor/test/test_matmul.py

+1
Original file line numberDiff line numberDiff line change
@@ -341,6 +341,7 @@ def check(a_shape, b_shape):
341341
check([8,1,3,4], [10,4,5])
342342
check([5,10,3,4], [5,10,4,5])
343343

344+
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
344345
@jt.flag_scope(use_cuda=1)
345346
def test_matmul_example2_cuda(self):
346347
self.test_matmul_example2()

python/jittor/test/test_numpy_code_op.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,9 @@ def check():
5858
one=numpy.ones(a.shape)
5959
assert numpy.allclose(da.data,one*2.0)
6060

61-
jt.flags.use_cuda = 0
62-
check()
63-
jt.flags.use_cuda = 1
61+
if jt.has_cuda:
62+
with jt.flag_scope(use_cuda=1):
63+
check()
6464
check()
6565

6666
def test(self):
@@ -92,9 +92,9 @@ def check():
9292
one=numpy.ones(a.shape)
9393
assert numpy.allclose(da.data,one*2.0)
9494

95-
jt.flags.use_cuda = 0
96-
check()
97-
jt.flags.use_cuda = 1
95+
if jt.has_cuda:
96+
with jt.flag_scope(use_cuda=1):
97+
check()
9898
check()
9999

100100
def test_multi_input(self):
@@ -139,9 +139,9 @@ def check():
139139
assert numpy.allclose(dda.data,one)
140140
assert numpy.allclose(ddb.data,mone)
141141

142-
jt.flags.use_cuda = 0
143-
check()
144-
jt.flags.use_cuda = 1
142+
if jt.has_cuda:
143+
with jt.flag_scope(use_cuda=1):
144+
check()
145145
check()
146146

147147
@unittest.skipIf(True, "Memory leak testing is not in progress, Skip")

python/jittor/test/test_parallel_pass.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def check(self, use_int32):
3636
b = jt.random((n, n))
3737
a.data, b.data
3838
with jt.profile_scope(compile_options = {
39-
"compile_shapes":1, "parallel":1
39+
"compile_shapes":1, "parallel":2, "try_use_32bit_index":use_int32
4040
}, try_use_32bit_index = use_int32) as rep:
4141
c = a + b
4242
nc = c.data

src/utils/log.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ void test_log_time(std::ostream* out) {
335335
auto finish = std::chrono::high_resolution_clock::now();
336336
auto total_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(finish-start).count();
337337
LOGi << "total_ns" << total_ns << "each_ns" << total_ns/n;
338-
CHECKop(total_ns/n,<=,6000);
338+
CHECKop(total_ns/n,<=,6500);
339339
};
340340
std::list<std::thread> ts;
341341
for (int i=0; i<nthread; i++) ts.emplace_back(log_lot);

src/var_holder.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@
1616

1717
namespace jittor {
1818

19-
DEFINE_FLAG(int, eager_execution, 0, "Use Eager execution rather than lazy execution, This flag makes error message and traceback infomation better. But this flag will raise memory consumption and lower the performance.");
19+
DEFINE_FLAG(int, lazy_execution, 1, "Default enabled, if disable, use immediately eager execution rather than lazy execution, This flag makes error message and traceback infomation better. But this flag will raise memory consumption and lower the performance.");
2020

2121
list<VarHolder*> VarHolder::hold_vars;
2222

2323
void add_hold_vars(VarHolder* self) {
2424
VarHolder::hold_vars.push_front(self);
2525
self->iter = VarHolder::hold_vars.begin();
26-
if (!eager_execution) return;
26+
if (lazy_execution) return;
2727
auto v = self->var;
2828
for (int i=0; i<5; i++) {
2929
auto op = v->input();

0 commit comments

Comments
 (0)