Skip to content
Open
Changes from 1 commit
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
9226e9d
add support for adapting pytorch's c++ codes.
Exusial Apr 28, 2022
282fc15
update ctorch.
Exusial Apr 28, 2022
3770a5d
Merge branch 'master' into ctorch
Exusial Apr 28, 2022
5e4f24e
fix multiple definitions.
Exusial Apr 30, 2022
ad4ee50
fix linspace zero division.
Exusial Sep 12, 2022
20da64e
Add interpolate area support.
Exusial Sep 15, 2022
2b394c5
Merge branch 'master' into ctorch
Exusial Sep 15, 2022
1531e1f
Merge branch 'debug' into ctorch
Exusial Sep 15, 2022
72b73fa
update ctorch with ArrayRef.
Exusial Sep 18, 2022
cd68088
add documentation and tests.
Exusial Sep 18, 2022
d7ac1eb
pass nvdiffrec renderutils.
Exusial Sep 20, 2022
46737e0
Merge branch 'master' into debug
Exusial Sep 20, 2022
941941c
fix batched matmul.
Exusial Sep 20, 2022
917e956
Merge branch 'debug' into ctorch
Exusial Sep 20, 2022
a8f07be
add broadcast support for matmul and function backward.
Exusial Sep 21, 2022
6ed2602
update ldflags.
Exusial Sep 21, 2022
d8801ae
add atan2.
Exusial Sep 22, 2022
4dff6e3
fix cuda arch.
Exusial Sep 22, 2022
5e78237
fix arrayref.
Exusial Sep 23, 2022
34e7dd7
delete log.
Exusial Sep 29, 2022
0c632eb
add int64 support.
Exusial Sep 30, 2022
409945e
fix int64 sum.
Exusial Oct 5, 2022
385c917
fix None.
Exusial Oct 14, 2022
f8ad90b
Merge branch 'master' into ctorch
Exusial Oct 21, 2022
f59119f
fix cuda.
Exusial Oct 21, 2022
8d9e725
fix cuda.
Exusial Oct 21, 2022
b29fb87
fix cuda.
Exusial Oct 21, 2022
34215c5
update.
Exusial Oct 21, 2022
8a3b50a
update dtype.
Exusial Oct 21, 2022
62431c9
update dtype.
Exusial Oct 22, 2022
60a551d
update header.
Exusial Oct 22, 2022
61a501c
fix type.
Exusial Oct 26, 2022
653147c
add ~.
Exusial Oct 27, 2022
cb483cf
fix release bug.
Exusial Oct 28, 2022
9a99f6c
fix memory.
Exusial Oct 31, 2022
30be2dc
add ninja build support.
Exusial Nov 3, 2022
46f4ac3
fix memroy leak.
Exusial Nov 15, 2022
40db8bd
merge.
Exusial Nov 26, 2022
cd0121f
Merge branch 'master' into ctorch
Feb 6, 2023
fe522ad
update compatibilty.
Feb 7, 2023
a474e4a
update ctorch stream process.
Feb 23, 2023
f8cb69a
update compiler.
Exusial Feb 23, 2023
3144e1e
update for optix.
Feb 27, 2023
7f30726
set up.
Exusial Feb 27, 2023
672c056
Merge branch 'ctorch' of github.com:Exusial/jittor into ctorch
Exusial Feb 27, 2023
2853904
add ctorch verbose mode.
Exusial Feb 28, 2023
e760279
fix load pytorch.
Exusial Mar 1, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update ctorch.
Exusial committed Apr 28, 2022
commit 282fc15c51824fb2ba2ca0c66354d864b8cddffc
15 changes: 15 additions & 0 deletions python/jittor/compiler.py
Original file line number Diff line number Diff line change
@@ -1368,3 +1368,18 @@ def func(x):
flags.has_pybt = has_pybt

core.set_lock_path(lock.lock_path)

def compile_torch_extensions(extension_name, sources, use_cuda=0):
if use_cuda:
compiler = nvcc_path
else:
compiler = cc_path
jittor_src_path = os.path.join(jittor_path, "src")
assert (isinstance(sources, str) or isinstance(sources, list)), "must input lists or concated string of source files"
if not isinstance(sources, str):
sources = " ".join(sources)
compile_command = f"{compiler} {sources} -I{jittor_src_path} -I{jittor_src_path}/ctorch -DTORCH_EXTENSION_NAME={extension_name} -O3 -shared -std=c++14 --forward-unknown-to-host-compiler --expt-relaxed-constexpr -fPIC -DHAS_CUDA -I{jittor_path}/extern/cuda/inc/ --allow-unsupported-compiler $(python3 -m pybind11 --includes) -o {extension_name}$(python3-config --extension-suffix)"
status = os.system(compile_command)
if status != 0:
LOGir << "Compile failed. If you are compiling CUDA ops, please set use_cuda to 1 in the parameters."
return status
Empty file.
Empty file.
15 changes: 15 additions & 0 deletions python/jittor/src/ctorch/c10/util/Half.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#pragma once
#include "var.h"
#include "var_holder.h"
#include "executor.h"
#include "ops/getitem_op.h"
#include "ops/op_register.h"
#include "pyjt/py_converter.h"
#include "misc/cuda_flags.h"
#include <cuda_runtime.h>
#include "helper_cuda.h"


namespace c10 {
using Half = jittor::float32; // to do: change this to float16.
}
Original file line number Diff line number Diff line change
@@ -215,15 +215,13 @@ namespace jittor {
}

bool is_cuda() {
LOGir << use_cuda << &use_cuda;
return use_cuda;
}

Option options() { // assume that jtptr is not nullptr
return Option(dtype());
}

// VarPtr jtptr;
VarHolder* jtptr;
int64 ndim;
at::MemoryFormat format;