Update modeling_motif.py
Browse filesscale_emb, norm_alpha remove
- modeling_motif.py +1 -4
modeling_motif.py
CHANGED
@@ -1065,9 +1065,6 @@ class MotifModel(MotifPreTrainedModel):
|
|
1065 |
self.gradient_checkpointing = False
|
1066 |
self.post_init()
|
1067 |
|
1068 |
-
self.scale_emb = 1
|
1069 |
-
self.norm_alpha = 1
|
1070 |
-
|
1071 |
def get_input_embeddings(self):
|
1072 |
return self.embed_tokens
|
1073 |
|
@@ -1120,7 +1117,7 @@ class MotifModel(MotifPreTrainedModel):
|
|
1120 |
"(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)")
|
1121 |
|
1122 |
if inputs_embeds is None:
|
1123 |
-
inputs_embeds = self.embed_tokens(input_ids)
|
1124 |
|
1125 |
if cache_position is None:
|
1126 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
|
1065 |
self.gradient_checkpointing = False
|
1066 |
self.post_init()
|
1067 |
|
|
|
|
|
|
|
1068 |
def get_input_embeddings(self):
|
1069 |
return self.embed_tokens
|
1070 |
|
|
|
1117 |
"(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)")
|
1118 |
|
1119 |
if inputs_embeds is None:
|
1120 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1121 |
|
1122 |
if cache_position is None:
|
1123 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|