spatial-8act / config.json.back.20250403_125446
juyil's picture
Initial model upload
30216fd verified
{
"norm_stats": {
"libero_spatial_no_noops": {
"action": {
"mean": [
0.15312479436397552,
0.13707277178764343,
-0.15526802837848663,
-0.005176450591534376,
-0.01120874285697937,
-0.020194264128804207,
0.4578818082809448
],
"std": [
0.41272708773612976,
0.34724321961402893,
0.50869220495224,
0.037266165018081665,
0.07244449853897095,
0.05762382969260216,
0.49827873706817627
],
"max": [
0.9375,
0.9375,
0.9375,
0.1971428543329239,
0.33642858266830444,
0.375,
1.0
],
"min": [
-0.9375,
-0.9375,
-0.9375,
-0.1875,
-0.3675000071525574,
-0.36000001430511475,
0.0
],
"q01": [
-0.7454732114076613,
-0.6616071462631226,
-0.9375,
-0.1071428582072258,
-0.20678570866584778,
-0.1842857152223587,
0.0
],
"q99": [
0.9375,
0.8758928775787354,
0.9321428537368774,
0.1039285734295845,
0.17678570747375488,
0.14571428298950195,
1.0
],
"mask": [
true,
true,
true,
true,
true,
true,
false
]
},
"proprio": {
"mean": [
-0.024462558329105377,
0.106529600918293,
1.0580483675003052,
3.0628468990325928,
-0.10464039444923401,
0.08307311683893204,
0.01995457336306572,
-0.020162804052233696
],
"std": [
0.1101478561758995,
0.13784688711166382,
0.1044282391667366,
0.10451053828001022,
0.4112098217010498,
0.2176690548658371,
0.017260896041989326,
0.0171116404235363
],
"max": [
0.1759040206670761,
0.3904820382595062,
1.3290715217590332,
3.4566118717193604,
1.2268599271774292,
1.0429412126541138,
0.041053611785173416,
0.000775813648942858
],
"min": [
-0.3095473051071167,
-0.29250794649124146,
0.9095591306686401,
2.497488260269165,
-1.8006486892700195,
-0.7207611203193665,
-0.0004703797458205372,
-0.041536275297403336
],
"q01": [
-0.2727657300233841,
-0.23721413239836692,
0.9160063165426254,
2.77949666261673,
-1.3187511622905732,
-0.41989982962608335,
0.001503719249740243,
-0.03989770736545324
],
"q99": [
0.13529365032911292,
0.3629165390133857,
1.2862326657772063,
3.2829698753356933,
0.9332760351896285,
0.6325724506378171,
0.039933966137468815,
-0.001671919699292631
]
},
"num_transitions": 52970,
"num_trajectories": 432
}
},
"n_action_bins": 256,
"vision_backbone_id": "dinosiglip-vit-so-224px",
"llm_backbone_id": "llama2-7b-pure",
"arch_specifier": "no-align+fused-gelu-mlp",
"output_projector_states": false,
"use_fused_vision_backbone": true,
"timm_model_ids": [
"vit_large_patch14_reg4_dinov2.lvd142m",
"vit_so400m_patch14_siglip_224"
],
"timm_override_act_layers": [
null,
null
],
"image_sizes": [
224,
224
],
"image_resize_strategy": "resize-naive",
"hf_llm_id": "meta-llama/Llama-2-7b-hf",
"llm_max_length": 2048,
"pad_token_id": 32000,
"pad_to_multiple_of": 64,
"text_config": {
"vocab_size": 32064,
"max_position_embeddings": 2048,
"hidden_size": 4096,
"intermediate_size": 11008,
"num_hidden_layers": 32,
"num_attention_heads": 32,
"num_key_value_heads": 32,
"hidden_act": "silu",
"initializer_range": 0.02,
"rms_norm_eps": 1e-06,
"pretraining_tp": 1,
"use_cache": true,
"rope_theta": 10000.0,
"rope_scaling": null,
"attention_bias": false,
"attention_dropout": 0.0,
"return_dict": true,
"output_hidden_states": false,
"output_attentions": false,
"torchscript": false,
"torch_dtype": "bfloat16",
"use_bfloat16": false,
"tf_legacy_loss": false,
"pruned_heads": {},
"tie_word_embeddings": false,
"chunk_size_feed_forward": 0,
"is_encoder_decoder": false,
"is_decoder": false,
"cross_attention_hidden_size": null,
"add_cross_attention": false,
"tie_encoder_decoder": false,
"max_length": 20,
"min_length": 0,
"do_sample": false,
"early_stopping": false,
"num_beams": 1,
"num_beam_groups": 1,
"diversity_penalty": 0.0,
"temperature": 1.0,
"top_k": 50,
"top_p": 1.0,
"typical_p": 1.0,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"no_repeat_ngram_size": 0,
"encoder_no_repeat_ngram_size": 0,
"bad_words_ids": null,
"num_return_sequences": 1,
"output_scores": false,
"return_dict_in_generate": false,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"remove_invalid_values": false,
"exponential_decay_length_penalty": null,
"suppress_tokens": null,
"begin_suppress_tokens": null,
"architectures": null,
"finetuning_task": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"tokenizer_class": null,
"prefix": null,
"bos_token_id": 1,
"pad_token_id": 32000,
"eos_token_id": 2,
"sep_token_id": null,
"decoder_start_token_id": null,
"task_specific_params": null,
"problem_type": null,
"_name_or_path": "",
"model_type": "llama"
},
"return_dict": true,
"output_hidden_states": false,
"output_attentions": false,
"torchscript": false,
"torch_dtype": "bfloat16",
"use_bfloat16": false,
"tf_legacy_loss": false,
"pruned_heads": {},
"tie_word_embeddings": true,
"chunk_size_feed_forward": 0,
"is_encoder_decoder": false,
"is_decoder": false,
"cross_attention_hidden_size": null,
"add_cross_attention": false,
"tie_encoder_decoder": false,
"max_length": 20,
"min_length": 0,
"do_sample": false,
"early_stopping": false,
"num_beams": 1,
"num_beam_groups": 1,
"diversity_penalty": 0.0,
"temperature": 1.0,
"top_k": 50,
"top_p": 1.0,
"typical_p": 1.0,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"no_repeat_ngram_size": 0,
"encoder_no_repeat_ngram_size": 0,
"bad_words_ids": null,
"num_return_sequences": 1,
"output_scores": false,
"return_dict_in_generate": false,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"remove_invalid_values": false,
"exponential_decay_length_penalty": null,
"suppress_tokens": null,
"begin_suppress_tokens": null,
"architectures": [
"OpenVLAForActionPrediction"
],
"finetuning_task": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"tokenizer_class": null,
"prefix": null,
"bos_token_id": null,
"eos_token_id": null,
"sep_token_id": null,
"decoder_start_token_id": null,
"task_specific_params": null,
"problem_type": null,
"_name_or_path": "/home/user1/.cache/huggingface/hub/models--openvla--openvla-7b/snapshots/31f090d05236101ebfc381b61c674dd4746d4ce0",
"transformers_version": "4.40.1",
"auto_map": {
"AutoConfig": "configuration_prismatic.OpenVLAConfig",
"AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
},
"model_type": "openvla"
}