Tiny dummy models
Collection
Randomly initialized tiny models for debugging/testing purpose
•
124 items
•
Updated
•
6
This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from baidu/ERNIE-4.5-VL-424B-A47B-PT.
import numpy as np
import torch
import transformers
from PIL import Image
from transformers import AutoModel, AutoModelForCausalLM, AutoProcessor, AutoTokenizer
model_id = "yujiepan/ernie-4.5-vl-moe-tiny-random"
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True,)
model = AutoModel.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="cuda",
trust_remote_code=True,
)
model.add_image_preprocess(processor)
image = Image.fromarray(np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8), 'RGB')
inputs = processor('What is this: <|IMAGE_START|><|image@placeholder|><|IMAGE_END|>', images=[image]).to('cuda')
# print(inputs)
generated_ids = model.generate(**inputs, max_new_tokens=4, use_cache=False)
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
print(output_text)
import json
from pathlib import Path
import accelerate
import torch
from huggingface_hub import file_exists, hf_hub_download
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoProcessor,
GenerationConfig,
set_seed,
)
source_model_id = "baidu/ERNIE-4.5-VL-424B-A47B-PT"
save_folder = "/tmp/yujiepan/ernie-4.5-vl-moe-tiny-random"
processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True)
processor.save_pretrained(save_folder)
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
config_json = json.load(f)
for k, v in config_json['auto_map'].items():
config_json['auto_map'][k] = f'{source_model_id}--{v}'
config_json['hidden_size'] = 8
config_json['intermediate_size'] = 32
# config_json['head_dim'] = 32
config_json['num_attention_heads'] = 4
config_json['num_hidden_layers'] = 2
config_json['num_key_value_heads'] = 4
config_json['tie_word_embeddings'] = False
config_json['use_cache'] = True
config_json['pixel_hidden_size'] = 16
config_json['moe_layer_start_index'] = 1
config_json['moe_intermediate_size'] = [32, 32]
config_json['moe_num_experts'] = [32, 32]
config_json['vision_config']['depth'] = 2
config_json['vision_config']['embed_dim'] = 16
config_json['vision_config']['hidden_size'] = 16
config_json['vision_config']['num_heads'] = 1
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
config = AutoConfig.from_pretrained(
save_folder,
trust_remote_code=True,
)
print(config)
torch.set_default_dtype(torch.bfloat16)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True,)
torch.set_default_dtype(torch.float32)
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
model.generation_config.do_sample = True
print(model.generation_config)
model = model.cpu()
with torch.no_grad():
for name, p in sorted(model.named_parameters()):
torch.nn.init.normal_(p, 0, 0.1)
print(name, p.shape)
model.save_pretrained(save_folder)
def modify_automap(path, source_model_id):
import json
with open(path, 'r', encoding='utf-8') as f:
content = json.load(f)
automap = {}
if content.get('auto_map', None) is not None:
for key, value in content.get('auto_map').items():
if isinstance(value, str):
value = source_model_id + '--' + value.split('--')[-1]
else:
value = [(source_model_id + '--' + v.split('--')[-1]) if '.' in str(v) else v for v in value]
automap[key] = value
with open(path, 'w', encoding='utf-8') as f:
json.dump({**content, 'auto_map': automap}, f, indent=2)
modify_automap(f"{save_folder}/config.json", source_model_id)
modify_automap(f'{save_folder}/processor_config.json', source_model_id)
modify_automap(f'{save_folder}/preprocessor_config.json', source_model_id)
modify_automap(f'{save_folder}/tokenizer_config.json', source_model_id)
for python_file in Path(save_folder).glob('*.py'):
python_file.unlink()
Ernie4_5_VLMoeForConditionalGeneration(
(model): Ernie4_5_Model(
(embed_tokens): Embedding(103424, 8)
(layers): ModuleList(
(0): Ernie4_5_DecoderLayer(
(self_attn): Ernie4_5_Attention(
(q_proj): Linear(in_features=8, out_features=8, bias=False)
(k_proj): Linear(in_features=8, out_features=8, bias=False)
(v_proj): Linear(in_features=8, out_features=8, bias=False)
(o_proj): Linear(in_features=8, out_features=8, bias=False)
(rotary_emb): RopeEmbedding()
)
(mlp): Ernie4_5_MLP(
(gate_proj): Linear(in_features=8, out_features=32, bias=False)
(up_proj): Linear(in_features=8, out_features=32, bias=False)
(down_proj): Linear(in_features=32, out_features=8, bias=False)
)
(input_layernorm): RMSNorm()
(post_attention_layernorm): RMSNorm()
(residual_add1): FusedDropoutImpl(
(dropout): Dropout(p=0.0, inplace=False)
)
(residual_add2): FusedDropoutImpl(
(dropout): Dropout(p=0.0, inplace=False)
)
)
(1): Ernie4_5_DecoderLayer(
(self_attn): Ernie4_5_Attention(
(q_proj): Linear(in_features=8, out_features=8, bias=False)
(k_proj): Linear(in_features=8, out_features=8, bias=False)
(v_proj): Linear(in_features=8, out_features=8, bias=False)
(o_proj): Linear(in_features=8, out_features=8, bias=False)
(rotary_emb): RopeEmbedding()
)
(mlp): MOEAllGatherLayerV2(
(gate): TopKGate()
(experts): ModuleList(
(0-63): 64 x Ernie4_5_MoeMLP(
(gate_proj): Linear(in_features=8, out_features=32, bias=False)
(up_proj): Linear(in_features=8, out_features=32, bias=False)
(down_proj): Linear(in_features=32, out_features=8, bias=False)
)
)
(moe_statics): MoEStatics()
)
(input_layernorm): RMSNorm()
(post_attention_layernorm): RMSNorm()
(residual_add1): FusedDropoutImpl(
(dropout): Dropout(p=0.0, inplace=False)
)
(residual_add2): FusedDropoutImpl(
(dropout): Dropout(p=0.0, inplace=False)
)
)
)
(norm): RMSNorm()
(resampler_model): VariableResolutionResamplerModel(
(spatial_linear): Sequential(
(0): Linear(in_features=64, out_features=64, bias=True)
(1): GELU(approximate='none')
(2): Linear(in_features=64, out_features=64, bias=True)
(3): LayerNorm((64,), eps=1e-06, elementwise_affine=True)
)
(temporal_linear): Sequential(
(0): Linear(in_features=128, out_features=64, bias=True)
(1): GELU(approximate='none')
(2): Linear(in_features=64, out_features=64, bias=True)
(3): LayerNorm((64,), eps=1e-06, elementwise_affine=True)
)
(mlp): Linear(in_features=64, out_features=8, bias=True)
(after_norm): RMSNorm()
)
)
(lm_head): Linear(in_features=8, out_features=103424, bias=False)
(vision_model): DFNRopeVisionTransformerPreTrainedModel(
(patch_embed): PatchEmbed(
(proj): Linear(in_features=588, out_features=16, bias=False)
)
(rotary_pos_emb): VisionRotaryEmbedding()
(blocks): ModuleList(
(0-1): 2 x DFNRopeVisionBlock(
(norm1): LayerNorm((16,), eps=1e-06, elementwise_affine=True)
(norm2): LayerNorm((16,), eps=1e-06, elementwise_affine=True)
(attn): VisionAttention(
(qkv): Linear(in_features=16, out_features=48, bias=True)
(proj): Linear(in_features=16, out_features=16, bias=True)
)
(mlp): VisionMlp(
(fc1): Linear(in_features=16, out_features=64, bias=True)
(act): QuickGELUActivation()
(fc2): Linear(in_features=64, out_features=16, bias=True)
)
)
)
(ln): LayerNorm((16,), eps=1e-06, elementwise_affine=True)
)
)
Base model
baidu/ERNIE-4.5-VL-424B-A47B-PT