Skip to content

Commit

Permalink
Updated a bug in the scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
ParsaHejabi committed May 25, 2024
1 parent 1587c00 commit b41bd0e
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 18 deletions.
16 changes: 8 additions & 8 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ huggingface-hub==0.23.1
idna==3.7
iniconfig==2.0.0
ipykernel==6.29.4
ipython==8.23.0
ipython==8.24.0
ipywidgets==8.1.2
isoduration==20.11.0
jedi==0.19.1
Expand All @@ -75,9 +75,9 @@ jupyter_client==8.6.1
jupyter_core==5.7.2
jupyter_server==2.13.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab==4.2.1
jupyterlab_pygments==0.3.0
jupyterlab_server==2.25.4
jupyterlab_server==2.27.2
jupyterlab_widgets==3.0.10
kiwisolver==1.4.5
MarkupSafe==2.1.5
Expand Down Expand Up @@ -106,7 +106,7 @@ nvidia-cufft-cu12==11.0.2.54
nvidia-curand-cu12==10.3.2.106
nvidia-cusolver-cu12==11.4.5.107
nvidia-cusparse-cu12==12.1.0.106
nvidia-nccl-cu12==2.19.3
nvidia-nccl-cu12==2.20.5
nvidia-nvjitlink-cu12==12.4.127
nvidia-nvtx-cu12==12.1.105
openai==1.30.1
Expand Down Expand Up @@ -168,14 +168,14 @@ tiktoken==0.7.0
tinycss2==1.2.1
tokenizers==0.19.1
tomli==2.0.1
torch==2.2.2
torchaudio==2.2.2
torchvision==0.17.2
torch==2.3.0
torchaudio==2.3.0
torchvision==0.18.0
tornado==6.4
tqdm==4.66.3
traitlets==5.14.2
transformers==4.41.1
triton==2.2.0
triton==2.3.0
types-python-dateutil==2.9.0.20240316
typing_extensions==4.10.0
tzdata==2024.1
Expand Down
15 changes: 5 additions & 10 deletions run_second_experiment.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

source venv/bin/activate

dry_run=true
dry_run=false

# player_llm_models can be a string with multiple models separated by whitestpace
# supported models up to now are: "meta-llama/Meta-Llama-3-8B-Instruct", "microsoft/Phi-3-small-8k-instruct", "google/gemma-1.1-7b-it", "mistralai/Mistral-7B-Instruct-v0.3", "gpt-3.5-turbo-0125"
player_llm_models_list="meta-llama/Meta-Llama-3-8B-Instruct microsoft/Phi-3-small-8k-instruct google/gemma-1.1-7b-it mistralai/Mistral-7B-Instruct-v0.3"
player_llm_models_list="meta-llama/Meta-Llama-3-8B-Instruct microsoft/Phi-3-small-8k-instruct google/gemma-1.1-7b-it mistralai/Mistral-7B-Instruct-v0.3 gpt-3.5-turbo-0125"
history_types_list="full mini"
# for each player_llm_models in player_llm_models_list
for player_llm_models in $player_llm_models_list; do
Expand All @@ -15,14 +15,9 @@ for player_llm_models in $player_llm_models_list; do
num_players=3
judge_llm_model="meta-llama/Meta-Llama-3-8B-Instruct"
# the first gpu is for the judge, and the rest are for the players
llm_gpu_mapping=(0 0 0 0)
# first make a unique set of indexes from llm_gpu_mapping array
llm_gpu_mapping_set=($(echo "${llm_gpu_mapping[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' '))
# join the elements of llm_gpu_mapping_set array with comma and store it in gpu_indexes
gpu_indexes=$(
IFS=,
echo "${llm_gpu_mapping_set[*]}"
)
llm_gpu_mapping=(7 7 7 7)
# gpu_indexes in this device are from 0 to 7
gpu_indexes="0,1,2,3,4,5,6,7"
# join the elements of llm_gpu_mapping array with whitespace and store it in llm_gpu_mapping
llm_gpu_mapping=$(
IFS=' '
Expand Down

0 comments on commit b41bd0e

Please sign in to comment.