We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ccd64fd commit 17bda71Copy full SHA for 17bda71
python/monarch/_src/job/slurm.py
@@ -137,7 +137,7 @@ def _submit_slurm_job(self, num_nodes: int) -> str:
137
sbatch_directives.append(f"#SBATCH --partition={self._partition}")
138
139
# add proportional cpu and memory args when not taking the full node
140
- if not self._exclusive and self._partition:
+ if not self._exclusive and self._partition and self._gpus_per_node:
141
gpus_per_task = self._gpus_per_node // self._ntasks_per_node
142
slurm_args = clusterscope.job_gen_task_slurm(
143
partition=self._partition,
0 commit comments