Skip to content

Commit 4708012

Browse files
committed
mpt : remove tabs, trailing whitespace
1 parent 90e7d6d commit 4708012

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

convert-mpt-hf-to-gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def parse_args() -> argparse.Namespace:
121121
gguf_writer.add_block_count(block_count)
122122
gguf_writer.add_feed_forward_length(4 * hparams["d_model"])
123123
gguf_writer.add_head_count(hparams["n_heads"])
124-
gguf_writer.add_layer_norm_eps(1e-05)
124+
gguf_writer.add_layer_norm_eps(1e-05)
125125
if hparams["attn_config"]["clip_qkv"] is not None:
126126
gguf_writer.add_clamp_kqv(hparams["attn_config"]["clip_qkv"])
127127
gguf_writer.add_max_alibi_bias(hparams["attn_config"]["alibi_bias_max"])

llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4304,11 +4304,11 @@ static struct ggml_cgraph * llm_build_mpt(
43044304
offload_func_kq(KQ_scaled);
43054305
ggml_set_name(KQ_scaled, "KQ_scaled");
43064306

4307-
// TODO: replace with ggml_add()
4308-
struct ggml_tensor * KQ_scaled_alibi =
4309-
ggml_alibi(ctx0, KQ_scaled, std::max(kv_head, n_kv - n_tokens), n_head, max_alibi_bias);
4307+
// TODO: replace with ggml_add()
4308+
struct ggml_tensor * KQ_scaled_alibi =
4309+
ggml_alibi(ctx0, KQ_scaled, std::max(kv_head, n_kv - n_tokens), n_head, max_alibi_bias);
43104310
offload_func_kq(KQ_scaled_alibi);
4311-
ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi");
4311+
ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi");
43124312

43134313
struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled_alibi, KQ_mask);
43144314
offload_func_kq(KQ_masked);

0 commit comments

Comments
 (0)