We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2a0a81a commit 737a50eCopy full SHA for 737a50e
src/llama-model.cpp
@@ -878,8 +878,8 @@ void llama_model::load_hparams(llama_model_loader & ml) {
878
}
879
880
hparams.f_attention_scale = type == LLM_TYPE_27B
881
- ? 1.0f / std::sqrtf(float(hparams.n_embd / hparams.n_head(0)))
882
- : 1.0f / std::sqrtf(float(hparams.n_embd_head_k));
+ ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
+ : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
883
} break;
884
case LLM_ARCH_STARCODER2:
885
{
0 commit comments