Skip to content

Commit 8aa6eb1

Browse files
authored
change FasterTransformer to TurboMind (#37)
1 parent 70e6ab2 commit 8aa6eb1

File tree

3 files changed

+16
-16
lines changed

3 files changed

+16
-16
lines changed

CMakeLists.txt

+8-8
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
cmake_minimum_required(VERSION 3.11 FATAL_ERROR) # for PyTorch extensions, version should be greater than 3.13
16-
project(FasterTransformer LANGUAGES CXX CUDA)
16+
project(TurboMind LANGUAGES CXX CUDA)
1717

1818
find_package(CUDA 10.2 REQUIRED)
1919

@@ -354,18 +354,18 @@ set_target_properties(transformer-shared PROPERTIES LINKER_LANGUAGE CXX)
354354
target_link_libraries(transformer-shared PUBLIC -lcudart -lcublas -lcublasLt -lcurand)
355355

356356
include(GNUInstallDirs)
357-
set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/FasterTransformer)
357+
set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/TurboMind)
358358

359359
include(CMakePackageConfigHelpers)
360360
configure_package_config_file(
361-
${CMAKE_CURRENT_LIST_DIR}/cmake/FasterTransformerConfig.cmake.in
362-
${CMAKE_CURRENT_BINARY_DIR}/FasterTransformerConfig.cmake
361+
${CMAKE_CURRENT_LIST_DIR}/cmake/TurboMindConfig.cmake.in
362+
${CMAKE_CURRENT_BINARY_DIR}/TurboMindConfig.cmake
363363
INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
364364
)
365365

366366
install(
367367
FILES
368-
${CMAKE_CURRENT_BINARY_DIR}/FasterTransformerConfig.cmake
368+
${CMAKE_CURRENT_BINARY_DIR}/TurboMindConfig.cmake
369369
DESTINATION ${INSTALL_CONFIGDIR}
370370
)
371371

@@ -382,7 +382,7 @@ install(
382382
EXPORT
383383
transformer-shared-targets
384384
FILE
385-
FasterTransformerTargets.cmake
385+
TurboMindTargets.cmake
386386
DESTINATION
387387
${INSTALL_CONFIGDIR}
388388
)
@@ -391,9 +391,9 @@ export(
391391
EXPORT
392392
transformer-shared-targets
393393
FILE
394-
${CMAKE_CURRENT_BINARY_DIR}/FasterTransformerTargets.cmake
394+
${CMAKE_CURRENT_BINARY_DIR}/TurboMindTargets.cmake
395395
NAMESPACE
396396
TritonCore::
397397
)
398398

399-
export(PACKAGE FasterTransformer)
399+
export(PACKAGE TurboMind)

cmake/FasterTransformerConfig.cmake.in renamed to cmake/TurboMindConfig.cmake.in

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@
2727
include(CMakeFindDependencyMacro)
2828

2929
get_filename_component(
30-
FASTERTRANSFORMER_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH
30+
TURBOMIND_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH
3131
)
3232

33-
list(APPEND CMAKE_MODULE_PATH ${FASTERTRANSFORMER_CMAKE_DIR})
33+
list(APPEND CMAKE_MODULE_PATH ${TURBOMIND_CMAKE_DIR})
3434

3535
if(NOT TARGET transformer-shared)
36-
include("${FASTERTRANSFORMER_CMAKE_DIR}/FasterTransformerTargets.cmake")
36+
include("${TURBOMIND_CMAKE_DIR}/TurboMindTargets.cmake")
3737
endif()
3838

39-
set(FASTERTRANSFORMER_LIBRARIES transformer-shared)
39+
set(TURBOMIND_LIBRARIES transformer-shared)

tests/gemm_dequantize/th_gemm_dequantize.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,14 @@ class TestGemmDequantize(unittest.TestCase):
1313
def setUp(self) -> None:
1414
torch.classes.load_library('lib/libth_transformer.so')
1515
torch.classes.load_library('lib/libgemm_dq_unit_ops.so')
16-
self.unpack_packed_int4s = torch.ops.fastertransformer.unpack_int4_packed_tensor_to_int8
17-
self.pack_int4s = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
16+
self.unpack_packed_int4s = torch.ops.turbomind.unpack_int4_packed_tensor_to_int8
17+
self.pack_int4s = torch.ops.turbomind.pack_int8_tensor_to_packed_int4
1818
self.fused_gemm_dq = torch.ops.gemm_dq_unit_ops.fused_gemm_dq
1919
self.fused_gemm_dq_bias_act = torch.ops.gemm_dq_unit_ops.fused_gemm_dq_bias_act
2020
self.bench = torch.ops.gemm_dq_unit_ops.benchmark_against_cublas_fp
21-
self.preprocess_weights_for_mixed_gemm = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
21+
self.preprocess_weights_for_mixed_gemm = torch.ops.turbomind.preprocess_weights_for_mixed_gemm
2222

23-
self.symmetric_quantizer = torch.ops.fastertransformer._symmetric_quantize_last_axis_of_batched_matrix
23+
self.symmetric_quantizer = torch.ops.turbomind._symmetric_quantize_last_axis_of_batched_matrix
2424

2525
torch.manual_seed(734876213)
2626

0 commit comments

Comments
 (0)