farzadab commited on
Commit
69c6e87
·
verified ·
1 Parent(s): b4f3a7b

Update code

Browse files
Files changed (4) hide show
  1. config.json +1 -78
  2. special_tokens_map.json +16 -1
  3. tokenizer.json +2 -2
  4. tokenizer_config.json +12 -1
config.json CHANGED
@@ -1,78 +1 @@
1
- {
2
- "_name_or_path": "/Users/zhuang/repos/ultravox-omni/artifacts/model-zhuang.2025-01-08-v0_5.llama3_2-1b-4a.246352f:v8",
3
- "architectures": [
4
- "UltravoxModel"
5
- ],
6
- "audio_config": {
7
- "_name_or_path": "openai/whisper-large-v3-turbo",
8
- "activation_dropout": 0.0,
9
- "activation_function": "gelu",
10
- "apply_spec_augment": false,
11
- "architectures": [
12
- "WhisperForConditionalGeneration"
13
- ],
14
- "attention_dropout": 0.0,
15
- "begin_suppress_tokens": [
16
- 220,
17
- 50256
18
- ],
19
- "bos_token_id": 50257,
20
- "d_model": 1280,
21
- "decoder_attention_heads": 20,
22
- "decoder_ffn_dim": 5120,
23
- "decoder_layerdrop": 0.0,
24
- "decoder_layers": 4,
25
- "decoder_start_token_id": 50258,
26
- "dropout": 0.0,
27
- "encoder_attention_heads": 20,
28
- "encoder_ffn_dim": 5120,
29
- "encoder_layerdrop": 0.0,
30
- "encoder_layers": 32,
31
- "eos_token_id": 50257,
32
- "init_std": 0.02,
33
- "is_encoder_decoder": true,
34
- "max_source_positions": 1500,
35
- "max_target_positions": 448,
36
- "median_filter_width": 7,
37
- "model_type": "whisper",
38
- "num_hidden_layers": 32,
39
- "num_mel_bins": 128,
40
- "pad_token_id": 50257,
41
- "scale_embedding": false,
42
- "torch_dtype": "float16",
43
- "use_cache": true,
44
- "vocab_size": 51866
45
- },
46
- "audio_latency_block_size": null,
47
- "audio_model_id": null,
48
- "auto_map": {
49
- "AutoConfig": "ultravox_config.UltravoxConfig",
50
- "AutoModel": "ultravox_model.UltravoxModel",
51
- "AutoProcessor": "ultravox_processing.UltravoxProcessor"
52
- },
53
- "custom_pipelines": {
54
- "ultravox-pipeline": {
55
- "impl": "ultravox_pipeline.UltravoxPipeline",
56
- "pt": [
57
- "AutoModel"
58
- ],
59
- "tf": [],
60
- "type": "multimodal"
61
- }
62
- },
63
- "hidden_size": 4096,
64
- "ignore_index": -100,
65
- "initializer_range": 0.02,
66
- "model_type": "ultravox",
67
- "norm_init": 0.4,
68
- "pad_token_id": 128009,
69
- "projector_act": "swiglu",
70
- "projector_ln_mid": true,
71
- "stack_factor": 8,
72
- "text_model_id": "meta-llama/Llama-3.2-1B-Instruct",
73
- "torch_dtype": "bfloat16",
74
- "transformers_version": "4.48.1",
75
- "num_attention_heads": 32,
76
- "num_hidden_layers": 16,
77
- "vocab_size": 128256
78
- }
 
1
+ {"_name_or_path": "/Users/zhuang/repos/ultravox-omni/artifacts/model-zhuang.2025-01-08-v0_5.llama3_2-1b-4a.246352f:v8", "architectures": ["UltravoxModel"], "audio_config": {"_name_or_path": "openai/whisper-large-v3-turbo", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50256], "bos_token_id": 50257, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 4, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 20, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "is_encoder_decoder": true, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50257, "scale_embedding": false, "torch_dtype": "float16", "use_cache": true, "vocab_size": 51866}, "audio_latency_block_size": null, "audio_model_id": null, "auto_map": {"AutoConfig": "ultravox_config.UltravoxConfig", "AutoModel": "ultravox_model.UltravoxModel", "AutoProcessor": "ultravox_processing.UltravoxProcessor"}, "custom_pipelines": {"ultravox-pipeline": {"impl": "ultravox_pipeline.UltravoxPipeline", "pt": ["AutoModel"], "tf": [], "type": "multimodal"}}, "hidden_size": 4096, "ignore_index": -100, "initializer_range": 0.02, "model_type": "ultravox", "norm_init": 0.4, "pad_token_id": 128009, "projector_act": "swiglu", "projector_ln_mid": true, "stack_factor": 8, "text_model_id": "meta-llama/Llama-3.2-1B-Instruct", "torch_dtype": "bfloat16", "transformers_version": "4.48.1", "num_attention_heads": 32, "num_hidden_layers": 16, "vocab_size": 128256, "audio_token_index": 128256}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
special_tokens_map.json CHANGED
@@ -1,4 +1,13 @@
1
  {
 
 
 
 
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<|begin_of_text|>",
4
  "lstrip": false,
@@ -13,5 +22,11 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|eot_id|>"
 
 
 
 
 
 
17
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|audio|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
  "bos_token": {
12
  "content": "<|begin_of_text|>",
13
  "lstrip": false,
 
22
  "rstrip": false,
23
  "single_word": false
24
  },
25
+ "pad_token": {
26
+ "content": "<|eot_id|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
  }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
3
- size 17209920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e87771bfea74770d36a1432b5f9ba6d7f611d33fb63681d10c2479540c1591f8
3
+ size 17210106
tokenizer_config.json CHANGED
@@ -2047,8 +2047,19 @@
2047
  "rstrip": false,
2048
  "single_word": false,
2049
  "special": true
 
 
 
 
 
 
 
 
2050
  }
2051
  },
 
 
 
2052
  "bos_token": "<|begin_of_text|>",
2053
  "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
2054
  "clean_up_tokenization_spaces": true,
@@ -2060,5 +2071,5 @@
2060
  ],
2061
  "model_max_length": 131072,
2062
  "pad_token": "<|eot_id|>",
2063
- "tokenizer_class": "PreTrainedTokenizerFast"
2064
  }
 
2047
  "rstrip": false,
2048
  "single_word": false,
2049
  "special": true
2050
+ },
2051
+ "128256": {
2052
+ "content": "<|audio|>",
2053
+ "lstrip": false,
2054
+ "normalized": false,
2055
+ "rstrip": false,
2056
+ "single_word": false,
2057
+ "special": true
2058
  }
2059
  },
2060
+ "additional_special_tokens": [
2061
+ "<|audio|>"
2062
+ ],
2063
  "bos_token": "<|begin_of_text|>",
2064
  "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
2065
  "clean_up_tokenization_spaces": true,
 
2071
  ],
2072
  "model_max_length": 131072,
2073
  "pad_token": "<|eot_id|>",
2074
+ "tokenizer_class": "PreTrainedTokenizer"
2075
  }