Skip to content

Commit 09b5ffc

Browse files
authored
Merge pull request #175 from VE-FORBRYDERNE/gptj-patch
Fix GPT-J model loading in TPU Colab when `vocab_size` is not divisible by 8
2 parents 7b5a766 + b20d80c commit 09b5ffc

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

maps/gptj.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@
99
},
1010
"static_weights": {
1111
"transformer.wte.weight": {"mtj": {"module": "embedding_shard/~/linear", "param": "w", "transforms": ["no_transpose", "vocab_pad"]}},
12-
"transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b"}},
12+
"transformer.wte.bias": {"mtj": {"module": "embedding_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}},
1313
"transformer.ln_f.weight": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "scale"}},
1414
"transformer.ln_f.bias": {"mtj": {"module": "projection_shard/~/replicated_layer_norm", "param": "offset"}},
1515
"lm_head.weight": {"mtj": {"module": "projection_shard/~/linear", "param": "w", "transforms": ["vocab_pad"]}},
16-
"lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b"}}
16+
"lm_head.bias": {"mtj": {"module": "projection_shard/~/linear", "param": "b", "transforms": ["vocab_pad"]}}
1717
},
1818
"layer_weights": {
1919
"transformer.h.{layer}.attn.bias": {},

tpu_mtj_backend.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1304,7 +1304,7 @@ def callback(model_dict, f, **_):
13041304
if "divide_by_shards" in transforms:
13051305
tensor /= params["cores_per_replica"]
13061306
if "vocab_pad" in transforms:
1307-
tensor = torch.nn.functional.pad(tensor, (0, 0, 0, params["n_vocab_padding"]))
1307+
tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],))
13081308
if "no_transpose" not in transforms and tensor.ndim == 2:
13091309
tensor = tensor.T
13101310
tensor.unsqueeze_(0)

0 commit comments

Comments
 (0)