Skip to content

Commit 2a8f608

Browse files
committed
add deep lgbm params to tutorial notebook comments
1 parent 0fe670a commit 2a8f608

File tree

4 files changed

+1082
-1211
lines changed

4 files changed

+1082
-1211
lines changed

example_model.ipynb

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,21 @@
126126
"# Train model\n",
127127
"import lightgbm as lgb\n",
128128
"model = lgb.LGBMRegressor(\n",
129-
" n_estimators=2000, # If you want to use a larger model we've found 20_000 trees to be better\n",
130-
" learning_rate=0.01, # and a learning rate of 0.001\n",
131-
" max_depth=5, # and max_depth=6\n",
132-
" num_leaves=2**5-1, # and num_leaves of 2**6-1\n",
129+
" n_estimators=2000,\n",
130+
" learning_rate=0.01,\n",
131+
" max_depth=5,\n",
132+
" num_leaves=2**5-1,\n",
133133
" colsample_bytree=0.1\n",
134134
")\n",
135+
"# We've found the following \"deep\" parameters perform much better, but they require much more CPU and RAM\n",
136+
"# model = lgb.LGBMRegressor(\n",
137+
"# n_estimators=2 30_000,\n",
138+
"# learning_rate=0.001,\n",
139+
"# max_depth=10,\n",
140+
"# num_leaves=2**10,\n",
141+
"# colsample_bytree=0.1\n",
142+
"# min_data_in_leaf=10000,\n",
143+
"# )\n",
135144
"model.fit(\n",
136145
" train[features],\n",
137146
" train[\"target\"]\n",

feature_neutralization.ipynb

Lines changed: 149 additions & 205 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)