|
126 | 126 | "# Train model\n",
|
127 | 127 | "import lightgbm as lgb\n",
|
128 | 128 | "model = lgb.LGBMRegressor(\n",
|
129 |
| - " n_estimators=2000, # If you want to use a larger model we've found 20_000 trees to be better\n", |
130 |
| - " learning_rate=0.01, # and a learning rate of 0.001\n", |
131 |
| - " max_depth=5, # and max_depth=6\n", |
132 |
| - " num_leaves=2**5-1, # and num_leaves of 2**6-1\n", |
| 129 | + " n_estimators=2000,\n", |
| 130 | + " learning_rate=0.01,\n", |
| 131 | + " max_depth=5,\n", |
| 132 | + " num_leaves=2**5-1,\n", |
133 | 133 | " colsample_bytree=0.1\n",
|
134 | 134 | ")\n",
|
| 135 | + "# We've found the following \"deep\" parameters perform much better, but they require much more CPU and RAM\n", |
| 136 | + "# model = lgb.LGBMRegressor(\n", |
| 137 | + "# n_estimators=2 30_000,\n", |
| 138 | + "# learning_rate=0.001,\n", |
| 139 | + "# max_depth=10,\n", |
| 140 | + "# num_leaves=2**10,\n", |
| 141 | + "# colsample_bytree=0.1\n", |
| 142 | + "# min_data_in_leaf=10000,\n", |
| 143 | + "# )\n", |
135 | 144 | "model.fit(\n",
|
136 | 145 | " train[features],\n",
|
137 | 146 | " train[\"target\"]\n",
|
|
0 commit comments