Skip to content

Commit

Permalink
Ensure new level_enum is used everywhere it should be to specify log …
Browse files Browse the repository at this point in the history
…levels
  • Loading branch information
vyasr committed Dec 12, 2024
1 parent c898c29 commit 4a3a18c
Show file tree
Hide file tree
Showing 18 changed files with 70 additions and 80 deletions.
14 changes: 7 additions & 7 deletions docs/source/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -91,25 +91,25 @@ they are:
- cuml.common.logger value
- Verbosity level
* - 0
- cuml.common.logger.level_off
- cuml.common.logger.level_enum.off
- Disables all log messages
* - 1
- cuml.common.logger.level_critical
- cuml.common.logger.level_enum.critical
- Enables only critical messages
* - 2
- cuml.common.logger.level_error
- cuml.common.logger.level_enum.error
- Enables all messages up to and including errors.
* - 3
- cuml.common.logger.level_warn
- cuml.common.logger.level_enum.warn
- Enables all messages up to and including warnings.
* - 4 or False
- cuml.common.logger.level_info
- cuml.common.logger.level_enum.info
- Enables all messages up to and including information messages.
* - 5 or True
- cuml.common.logger.level_debug
- cuml.common.logger.level_enum.debug
- Enables all messages up to and including debug messages.
* - 6
- cuml.common.logger.level_trace
- cuml.common.logger.level_enum.trace
- Enables all messages up to and including trace messages.


Expand Down
2 changes: 1 addition & 1 deletion python/cuml/cuml/common/kernel_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):
nvrtc_kernel_str,
)

if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug(str(nvrtc_kernel_str))

return cp.RawKernel(nvrtc_kernel_str, kernel_name)
4 changes: 2 additions & 2 deletions python/cuml/cuml/dask/common/dask_df_utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -39,7 +39,7 @@ def to_dask_cudf(futures, client=None):
c = default_client() if client is None else client
# Convert a list of futures containing dfs back into a dask_cudf
dfs = [d for d in futures if d.type != type(None)] # NOQA
if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug("to_dask_cudf dfs=%s" % str(dfs))
meta = c.submit(get_meta, dfs[0])
meta_local = meta.result()
Expand Down
2 changes: 1 addition & 1 deletion python/cuml/cuml/dask/common/input_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def _to_dask_cudf(futures, client=None):
c = default_client() if client is None else client
# Convert a list of futures containing dfs back into a dask_cudf
dfs = [d for d in futures if d.type != type(None)] # NOQA
if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug("to_dask_cudf dfs=%s" % str(dfs))
meta_future = c.submit(_get_meta, dfs[0], pure=False)
meta = meta_future.result()
Expand Down
2 changes: 1 addition & 1 deletion python/cuml/cuml/experimental/accel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def _install_for_library(library_name):

def install():
"""Enable cuML Accelerator Mode."""
logger.set_level(logger.level_info)
logger.set_level(logger.level_enum.info)
logger.set_pattern("%v")

logger.info("cuML: Installing experimental accelerator...")
Expand Down
8 changes: 4 additions & 4 deletions python/cuml/cuml/explainer/base.pyx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -125,13 +125,13 @@ class SHAPBase():
output_type=None):

if verbose is True:
self.verbose = logger.level_debug
self.verbose = logger.level_enum.debug
elif verbose is False:
self.verbose = logger.level_error
self.verbose = logger.level_enum.error
else:
self.verbose = verbose

if self.verbose >= logger.level_debug:
if self.verbose >= logger.level_enum.debug:
self.time_performance = True
else:
self.time_performance = False
Expand Down
17 changes: 4 additions & 13 deletions python/cuml/cuml/internals/base.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -146,9 +146,8 @@ class Base(TagsMixin,
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
verbose : level_enum
Sets logging level. See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
Expand Down Expand Up @@ -208,7 +207,7 @@ class Base(TagsMixin,

def __init__(self, *,
handle=None,
verbose=False,
verbose=logger.level_enum.info,
output_type=None,
output_mem_type=None):
"""
Expand All @@ -222,15 +221,7 @@ class Base(TagsMixin,
self.handle = None

IF GPUBUILD == 1:
# Internally, self.verbose follows the spdlog/c++ standard of
# 0 is most logging, and logging decreases from there.
# So if the user passes an int value for logging, we convert it.
if verbose is True:
self.verbose = logger.level_enum.debug
elif verbose is False:
self.verbose = logger.level_enum.info
else:
self.verbose = logger.level_enum(verbose)
self.verbose = logger.level_enum(verbose)
ELSE:
self.verbose = verbose

Expand Down
16 changes: 8 additions & 8 deletions python/cuml/cuml/internals/logger.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@ def set_level(level):
# regular usage of setting a logging level for all subsequent logs
# in this case, it will enable all logs upto and including `info()`
logger.set_level(logger.level_info)
logger.set_level(logger.level_enum.info)
# in case one wants to temporarily set the log level for a code block
with logger.set_level(logger.level_debug) as _:
with logger.set_level(logger.level_enum.debug) as _:
logger.debug("Hello world!")
Parameters
Expand Down Expand Up @@ -161,16 +161,15 @@ def should_log_for(level):
.. code-block:: python
if logger.should_log_for(level_info):
if logger.should_log_for(level_enum.info):
# which could waste precious CPU cycles
my_message = construct_message()
logger.info(my_message)
Parameters
----------
level : level_enum
Logging level to be set. \
It must be one of cuml.common.logger.level_*
Logging level to be set.
"""
IF GPUBUILD == 1:
return default_logger().should_log(level)
Expand All @@ -182,11 +181,12 @@ def _log(level_enum lvl, msg, default_func):
Parameters
----------
lvl : int
Logging level to be set. \
It must be one of cuml.common.logger.level_*
lvl : level_enum
Logging level to be set.
msg : str
Message to be logged.
default_func : function
Default logging function to be used if GPU build is disabled.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
Expand Down
15 changes: 7 additions & 8 deletions python/cuml/cuml/linear_model/logistic_regression.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,8 @@ class LogisticRegression(UniversalBase,
linesearch_max_iter : int (default = 50)
Max number of linesearch iterations per outer iteration used in the
lbfgs and owl QN solvers.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
verbose : level_enum
Sets logging level. See :ref:`verbosity-levels` for more info.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1`
solver : 'qn' (default='qn')
Expand Down Expand Up @@ -277,7 +276,7 @@ class LogisticRegression(UniversalBase,
handle=self.handle,
)

if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
self.verb_prefix = "CY::"
logger.debug(self.verb_prefix + "Estimator parameters:")
logger.debug(pprint.pformat(self.__dict__))
Expand Down Expand Up @@ -353,24 +352,24 @@ class LogisticRegression(UniversalBase,
else:
loss = "sigmoid"

if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug(self.verb_prefix + "Setting loss to " + str(loss))

self.solver_model.loss = loss

if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug(self.verb_prefix + "Calling QN fit " + str(loss))

self.solver_model.fit(X, y_m, sample_weight=sample_weight,
convert_dtype=convert_dtype)

# coefficients and intercept are contained in the same array
if logger.should_log_for(logger.level_debug):
if logger.should_log_for(logger.level_enum.debug):
logger.debug(
self.verb_prefix + "Setting coefficients " + str(loss)
)

if logger.should_log_for(logger.level_trace):
if logger.should_log_for(logger.level_enum.trace):
with using_output_type("cupy"):
logger.trace(self.verb_prefix + "Coefficients: " +
str(self.solver_model.coef_))
Expand Down
6 changes: 3 additions & 3 deletions python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -153,7 +153,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG):

cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()

is_verbose = logger.should_log_for(logger.level_debug)
is_verbose = logger.should_log_for(logger.level_enum.debug)
knn_classify(
handle_[0],
out_result_local_parts,
Expand Down Expand Up @@ -265,7 +265,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG):
p_cai.ptr)

cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
is_verbose = logger.should_log_for(logger.level_enum.debug)

# Launch distributed operations
knn_classify(
Expand Down
4 changes: 2 additions & 2 deletions python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -127,7 +127,7 @@ class KNeighborsRegressorMG(NearestNeighborsMG):
<float*><uintptr_t>o_cai.ptr, n_rows * n_outputs))

cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
is_verbose = logger.should_log_for(logger.level_enum.debug)

# Launch distributed operations
knn_regress(
Expand Down
4 changes: 2 additions & 2 deletions python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -125,7 +125,7 @@ class NearestNeighborsMG(NearestNeighbors):
result = type(self).alloc_local_output(local_query_rows, self.n_neighbors)

cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
is_verbose = logger.should_log_for(logger.level_enum.debug)

# Launch distributed operations
knn(
Expand Down
Loading

0 comments on commit 4a3a18c

Please sign in to comment.