Joctor commited on
Commit
d206f76
1 Parent(s): fb6a006

End of training

Browse files
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26744900c060e7e706c9e90367e738ce6060ebf2f262888bedc9907a053f2cc2
3
  size 19957360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:133177ff742b43b1823adec737a083036212553f0ad4df144d15c60724df594c
3
  size 19957360
runs/May16_07-25-42_3064694ee8b3/events.out.tfevents.1715844342.3064694ee8b3.24.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c7ea4bd2c2c3c0c87d3c5c82a865d4677987918a8f24c8b54c91bcb175f1e8a
3
+ size 15032
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d32ed4389cb7bb0f25f2133648f8dafc6556bf91207714235dd650c61b42ff29
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9364262c088c264aae360de9c33cf7585dafb20dc6aa38a81b1c728c3320d192
3
  size 4920
wandb/debug-internal.log CHANGED
The diff for this file is too large to render. See raw diff
 
wandb/debug.log CHANGED
@@ -1,50 +1,30 @@
1
- 2024-05-16 07:00:49,430 INFO MainThread:34 [wandb_setup.py:_flush():76] Current SDK version is 0.16.6
2
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Configure stats pid to 34
3
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
7
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
8
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
9
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_setup.py:_flush():76] Applying login settings: {}
10
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_init.py:_log_setup():521] Logging user logs to /kaggle/working/wandb/run-20240516_070049-8ecq2cdf/logs/debug.log
11
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_init.py:_log_setup():522] Logging internal logs to /kaggle/working/wandb/run-20240516_070049-8ecq2cdf/logs/debug-internal.log
12
- 2024-05-16 07:00:49,431 INFO MainThread:34 [wandb_init.py:_jupyter_setup():467] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x7f2ce0184cd0>
13
- 2024-05-16 07:00:49,432 INFO MainThread:34 [wandb_init.py:init():561] calling init triggers
14
- 2024-05-16 07:00:49,432 INFO MainThread:34 [wandb_init.py:init():568] wandb.init called with sweep_config: {}
15
  config: {}
16
- 2024-05-16 07:00:49,432 INFO MainThread:34 [wandb_init.py:init():611] starting backend
17
- 2024-05-16 07:00:49,432 INFO MainThread:34 [wandb_init.py:init():615] setting up manager
18
- 2024-05-16 07:00:49,434 INFO MainThread:34 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
19
- 2024-05-16 07:00:49,438 INFO MainThread:34 [wandb_init.py:init():623] backend started and connected
20
- 2024-05-16 07:00:49,449 INFO MainThread:34 [wandb_run.py:_label_probe_notebook():1299] probe notebook
21
- 2024-05-16 07:00:49,786 INFO MainThread:34 [wandb_init.py:init():715] updated telemetry
22
- 2024-05-16 07:00:49,791 INFO MainThread:34 [wandb_init.py:init():748] communicating run to backend with 90.0 second timeout
23
- 2024-05-16 07:00:49,943 INFO MainThread:34 [wandb_run.py:_on_init():2357] communicating current version
24
- 2024-05-16 07:00:50,031 INFO MainThread:34 [wandb_run.py:_on_init():2366] got version response upgrade_message: "wandb version 0.17.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
25
 
26
- 2024-05-16 07:00:50,033 INFO MainThread:34 [wandb_init.py:init():799] starting run threads in backend
27
- 2024-05-16 07:01:06,114 INFO MainThread:34 [wandb_run.py:_console_start():2335] atexit reg
28
- 2024-05-16 07:01:06,114 INFO MainThread:34 [wandb_run.py:_redirect():2190] redirect: wrap_raw
29
- 2024-05-16 07:01:06,114 INFO MainThread:34 [wandb_run.py:_redirect():2255] Wrapping output streams.
30
- 2024-05-16 07:01:06,114 INFO MainThread:34 [wandb_run.py:_redirect():2280] Redirects installed.
31
- 2024-05-16 07:01:06,116 INFO MainThread:34 [wandb_init.py:init():842] run started, returning control to user process
32
- 2024-05-16 07:01:06,122 INFO MainThread:34 [wandb_run.py:_config_callback():1347] config_cb None None {'ignore_index': -100, 'image_token_index': 32000, 'projector_hidden_act': 'gelu', 'vision_feature_select_strategy': 'default', 'vision_feature_layer': -2, 'vision_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', 'model_type': 'clip_vision_model', 'vocab_size': 32000, 'hidden_size': 1024, 'intermediate_size': 4096, 'projection_dim': 768, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'num_channels': 3, 'patch_size': 14, 'image_size': 336, 'initializer_range': 0.02, 'initializer_factor': 1.0, 'attention_dropout': 0.0, 'layer_norm_eps': 1e-05, 'hidden_act': 'quick_gelu'}, 'text_config': {'vocab_size': 32064, 'max_position_embeddings': 4096, 'hidden_size': 4096, 'intermediate_size': 11008, 'num_hidden_layers': 32, 'num_attention_heads': 32, 'num_key_value_heads': 32, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': True, 'rope_theta': 10000.0, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': None, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'lmsys/vicuna-7b-v1.5', 'model_type': 'llama'}, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlavaForConditionalGeneration'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 32001, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'llava-hf/llava-1.5-7b-hf', 'transformers_version': '4.39.3', 'model_type': 'llava', 'quantization_config': {'quant_method': 'QuantizationMethod.BITS_AND_BYTES', '_load_in_8bit': False, '_load_in_4bit': True, 'llm_int8_threshold': 6.0, 'llm_int8_skip_modules': None, 'llm_int8_enable_fp32_cpu_offload': False, 'llm_int8_has_fp16_weight': False, 'bnb_4bit_quant_type': 'nf4', 'bnb_4bit_use_double_quant': True, 'bnb_4bit_compute_dtype': 'bfloat16', 'bnb_4bit_quant_storage': 'uint8', 'load_in_4bit': True, 'load_in_8bit': False}, 'output_dir': './', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': False, 'do_predict': False, 'evaluation_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 2, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0002, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 1, 'max_steps': -1, 'lr_scheduler_type': 'constant', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': './runs/May16_07-00-48_cc8faff11463', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_bnb_8bit', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
33
- 2024-05-16 07:16:16,067 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
34
- 2024-05-16 07:16:16,068 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
35
- 2024-05-16 07:16:16,075 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
36
- 2024-05-16 07:16:18,184 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
37
- 2024-05-16 07:16:18,184 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
38
- 2024-05-16 07:18:56,636 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
39
- 2024-05-16 07:18:56,684 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
40
- 2024-05-16 07:18:56,684 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
41
- 2024-05-16 07:19:43,460 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
42
- 2024-05-16 07:19:43,507 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
43
- 2024-05-16 07:19:43,507 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
44
- 2024-05-16 07:19:54,270 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
45
- 2024-05-16 07:19:54,320 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
46
- 2024-05-16 07:19:54,320 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
47
- 2024-05-16 07:20:06,268 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
48
- 2024-05-16 07:20:06,319 INFO MainThread:34 [jupyter.py:save_ipynb():373] not saving jupyter notebook
49
- 2024-05-16 07:20:06,319 INFO MainThread:34 [wandb_init.py:_pause_backend():432] pausing backend
50
- 2024-05-16 07:20:19,142 INFO MainThread:34 [wandb_init.py:_resume_backend():437] resuming backend
 
1
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Current SDK version is 0.16.6
2
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Configure stats pid to 24
3
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
7
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
8
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
9
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:_log_setup():521] Logging user logs to /kaggle/working/wandb/run-20240516_072542-5ru1r69k/logs/debug.log
11
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:_log_setup():522] Logging internal logs to /kaggle/working/wandb/run-20240516_072542-5ru1r69k/logs/debug-internal.log
12
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():561] calling init triggers
13
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():568] wandb.init called with sweep_config: {}
 
14
  config: {}
15
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():611] starting backend
16
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():615] setting up manager
17
+ 2024-05-16 07:25:42,955 INFO MainThread:24 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-16 07:25:42,958 INFO MainThread:24 [wandb_init.py:init():623] backend started and connected
19
+ 2024-05-16 07:25:42,962 INFO MainThread:24 [wandb_init.py:init():715] updated telemetry
20
+ 2024-05-16 07:25:42,967 INFO MainThread:24 [wandb_init.py:init():748] communicating run to backend with 90.0 second timeout
21
+ 2024-05-16 07:25:43,132 INFO MainThread:24 [wandb_run.py:_on_init():2357] communicating current version
22
+ 2024-05-16 07:25:43,218 INFO MainThread:24 [wandb_run.py:_on_init():2366] got version response upgrade_message: "wandb version 0.17.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
 
23
 
24
+ 2024-05-16 07:25:43,220 INFO MainThread:24 [wandb_init.py:init():799] starting run threads in backend
25
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_console_start():2335] atexit reg
26
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_redirect():2190] redirect: wrap_raw
27
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_redirect():2255] Wrapping output streams.
28
+ 2024-05-16 07:25:59,294 INFO MainThread:24 [wandb_run.py:_redirect():2280] Redirects installed.
29
+ 2024-05-16 07:25:59,295 INFO MainThread:24 [wandb_init.py:init():842] run started, returning control to user process
30
+ 2024-05-16 07:25:59,302 INFO MainThread:24 [wandb_run.py:_config_callback():1347] config_cb None None {'ignore_index': -100, 'image_token_index': 32000, 'projector_hidden_act': 'gelu', 'vision_feature_select_strategy': 'default', 'vision_feature_layer': -2, 'vision_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', 'model_type': 'clip_vision_model', 'vocab_size': 32000, 'hidden_size': 1024, 'intermediate_size': 4096, 'projection_dim': 768, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'num_channels': 3, 'patch_size': 14, 'image_size': 336, 'initializer_range': 0.02, 'initializer_factor': 1.0, 'attention_dropout': 0.0, 'layer_norm_eps': 1e-05, 'hidden_act': 'quick_gelu'}, 'text_config': {'vocab_size': 32064, 'max_position_embeddings': 4096, 'hidden_size': 4096, 'intermediate_size': 11008, 'num_hidden_layers': 32, 'num_attention_heads': 32, 'num_key_value_heads': 32, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': True, 'rope_theta': 10000.0, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': None, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'lmsys/vicuna-7b-v1.5', 'model_type': 'llama'}, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlavaForConditionalGeneration'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 32001, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'llava-hf/llava-1.5-7b-hf', 'transformers_version': '4.39.3', 'model_type': 'llava', 'quantization_config': {'quant_method': 'QuantizationMethod.BITS_AND_BYTES', '_load_in_8bit': False, '_load_in_4bit': True, 'llm_int8_threshold': 6.0, 'llm_int8_skip_modules': None, 'llm_int8_enable_fp32_cpu_offload': False, 'llm_int8_has_fp16_weight': False, 'bnb_4bit_quant_type': 'nf4', 'bnb_4bit_use_double_quant': True, 'bnb_4bit_compute_dtype': 'bfloat16', 'bnb_4bit_quant_storage': 'uint8', 'load_in_4bit': True, 'load_in_8bit': False}, 'output_dir': './', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': False, 'do_predict': False, 'evaluation_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 2, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0002, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 1, 'max_steps': -1, 'lr_scheduler_type': 'constant', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': './runs/May16_07-25-42_3064694ee8b3', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_bnb_8bit', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wandb/run-20240516_072542-5ru1r69k/files/conda-environment.yaml ADDED
File without changes
wandb/run-20240516_072542-5ru1r69k/files/config.yaml ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.13
7
+ cli_version: 0.16.6
8
+ framework: huggingface
9
+ huggingface_version: 4.39.3
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: true
12
+ start_time: 1715844342.0
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 2
17
+ - 3
18
+ - 5
19
+ - 11
20
+ - 12
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 84
27
+ - 98
28
+ - 105
29
+ 2:
30
+ - 1
31
+ - 2
32
+ - 3
33
+ - 5
34
+ - 11
35
+ - 12
36
+ - 49
37
+ - 51
38
+ - 53
39
+ - 55
40
+ - 71
41
+ - 84
42
+ - 98
43
+ - 105
44
+ 3:
45
+ - 7
46
+ - 23
47
+ 4: 3.10.13
48
+ 5: 0.16.6
49
+ 6: 4.39.3
50
+ 8:
51
+ - 2
52
+ - 5
53
+ - 13
54
+ 9:
55
+ 1: transformers_trainer
56
+ 13: linux-x86_64
57
+ m:
58
+ - 1: train/global_step
59
+ 6:
60
+ - 3
61
+ - 1: train/loss
62
+ 5: 1
63
+ 6:
64
+ - 1
65
+ - 1: train/grad_norm
66
+ 5: 1
67
+ 6:
68
+ - 1
69
+ - 1: train/learning_rate
70
+ 5: 1
71
+ 6:
72
+ - 1
73
+ - 1: train/epoch
74
+ 5: 1
75
+ 6:
76
+ - 1
77
+ ignore_index:
78
+ desc: null
79
+ value: -100
80
+ image_token_index:
81
+ desc: null
82
+ value: 32000
83
+ projector_hidden_act:
84
+ desc: null
85
+ value: gelu
86
+ vision_feature_select_strategy:
87
+ desc: null
88
+ value: default
89
+ vision_feature_layer:
90
+ desc: null
91
+ value: -2
92
+ vision_config:
93
+ desc: null
94
+ value:
95
+ return_dict: true
96
+ output_hidden_states: false
97
+ output_attentions: false
98
+ torchscript: false
99
+ torch_dtype: null
100
+ use_bfloat16: false
101
+ tf_legacy_loss: false
102
+ pruned_heads: {}
103
+ tie_word_embeddings: true
104
+ chunk_size_feed_forward: 0
105
+ is_encoder_decoder: false
106
+ is_decoder: false
107
+ cross_attention_hidden_size: null
108
+ add_cross_attention: false
109
+ tie_encoder_decoder: false
110
+ max_length: 20
111
+ min_length: 0
112
+ do_sample: false
113
+ early_stopping: false
114
+ num_beams: 1
115
+ num_beam_groups: 1
116
+ diversity_penalty: 0.0
117
+ temperature: 1.0
118
+ top_k: 50
119
+ top_p: 1.0
120
+ typical_p: 1.0
121
+ repetition_penalty: 1.0
122
+ length_penalty: 1.0
123
+ no_repeat_ngram_size: 0
124
+ encoder_no_repeat_ngram_size: 0
125
+ bad_words_ids: null
126
+ num_return_sequences: 1
127
+ output_scores: false
128
+ return_dict_in_generate: false
129
+ forced_bos_token_id: null
130
+ forced_eos_token_id: null
131
+ remove_invalid_values: false
132
+ exponential_decay_length_penalty: null
133
+ suppress_tokens: null
134
+ begin_suppress_tokens: null
135
+ architectures: null
136
+ finetuning_task: null
137
+ id2label:
138
+ '0': LABEL_0
139
+ '1': LABEL_1
140
+ label2id:
141
+ LABEL_0: 0
142
+ LABEL_1: 1
143
+ tokenizer_class: null
144
+ prefix: null
145
+ bos_token_id: null
146
+ pad_token_id: null
147
+ eos_token_id: null
148
+ sep_token_id: null
149
+ decoder_start_token_id: null
150
+ task_specific_params: null
151
+ problem_type: null
152
+ _name_or_path: ''
153
+ model_type: clip_vision_model
154
+ vocab_size: 32000
155
+ hidden_size: 1024
156
+ intermediate_size: 4096
157
+ projection_dim: 768
158
+ num_hidden_layers: 24
159
+ num_attention_heads: 16
160
+ num_channels: 3
161
+ patch_size: 14
162
+ image_size: 336
163
+ initializer_range: 0.02
164
+ initializer_factor: 1.0
165
+ attention_dropout: 0.0
166
+ layer_norm_eps: 1.0e-05
167
+ hidden_act: quick_gelu
168
+ text_config:
169
+ desc: null
170
+ value:
171
+ vocab_size: 32064
172
+ max_position_embeddings: 4096
173
+ hidden_size: 4096
174
+ intermediate_size: 11008
175
+ num_hidden_layers: 32
176
+ num_attention_heads: 32
177
+ num_key_value_heads: 32
178
+ hidden_act: silu
179
+ initializer_range: 0.02
180
+ rms_norm_eps: 1.0e-05
181
+ pretraining_tp: 1
182
+ use_cache: true
183
+ rope_theta: 10000.0
184
+ rope_scaling: null
185
+ attention_bias: false
186
+ attention_dropout: 0.0
187
+ return_dict: true
188
+ output_hidden_states: false
189
+ output_attentions: false
190
+ torchscript: false
191
+ torch_dtype: float16
192
+ use_bfloat16: false
193
+ tf_legacy_loss: false
194
+ pruned_heads: {}
195
+ tie_word_embeddings: false
196
+ chunk_size_feed_forward: 0
197
+ is_encoder_decoder: false
198
+ is_decoder: false
199
+ cross_attention_hidden_size: null
200
+ add_cross_attention: false
201
+ tie_encoder_decoder: false
202
+ max_length: 20
203
+ min_length: 0
204
+ do_sample: false
205
+ early_stopping: false
206
+ num_beams: 1
207
+ num_beam_groups: 1
208
+ diversity_penalty: 0.0
209
+ temperature: 1.0
210
+ top_k: 50
211
+ top_p: 1.0
212
+ typical_p: 1.0
213
+ repetition_penalty: 1.0
214
+ length_penalty: 1.0
215
+ no_repeat_ngram_size: 0
216
+ encoder_no_repeat_ngram_size: 0
217
+ bad_words_ids: null
218
+ num_return_sequences: 1
219
+ output_scores: false
220
+ return_dict_in_generate: false
221
+ forced_bos_token_id: null
222
+ forced_eos_token_id: null
223
+ remove_invalid_values: false
224
+ exponential_decay_length_penalty: null
225
+ suppress_tokens: null
226
+ begin_suppress_tokens: null
227
+ architectures:
228
+ - LlamaForCausalLM
229
+ finetuning_task: null
230
+ id2label:
231
+ '0': LABEL_0
232
+ '1': LABEL_1
233
+ label2id:
234
+ LABEL_0: 0
235
+ LABEL_1: 1
236
+ tokenizer_class: null
237
+ prefix: null
238
+ bos_token_id: 1
239
+ pad_token_id: null
240
+ eos_token_id: 2
241
+ sep_token_id: null
242
+ decoder_start_token_id: null
243
+ task_specific_params: null
244
+ problem_type: null
245
+ _name_or_path: lmsys/vicuna-7b-v1.5
246
+ model_type: llama
247
+ return_dict:
248
+ desc: null
249
+ value: true
250
+ output_hidden_states:
251
+ desc: null
252
+ value: false
253
+ output_attentions:
254
+ desc: null
255
+ value: false
256
+ torchscript:
257
+ desc: null
258
+ value: false
259
+ torch_dtype:
260
+ desc: null
261
+ value: bfloat16
262
+ use_bfloat16:
263
+ desc: null
264
+ value: false
265
+ tf_legacy_loss:
266
+ desc: null
267
+ value: false
268
+ pruned_heads:
269
+ desc: null
270
+ value: {}
271
+ tie_word_embeddings:
272
+ desc: null
273
+ value: false
274
+ chunk_size_feed_forward:
275
+ desc: null
276
+ value: 0
277
+ is_encoder_decoder:
278
+ desc: null
279
+ value: false
280
+ is_decoder:
281
+ desc: null
282
+ value: false
283
+ cross_attention_hidden_size:
284
+ desc: null
285
+ value: null
286
+ add_cross_attention:
287
+ desc: null
288
+ value: false
289
+ tie_encoder_decoder:
290
+ desc: null
291
+ value: false
292
+ max_length:
293
+ desc: null
294
+ value: 20
295
+ min_length:
296
+ desc: null
297
+ value: 0
298
+ do_sample:
299
+ desc: null
300
+ value: false
301
+ early_stopping:
302
+ desc: null
303
+ value: false
304
+ num_beams:
305
+ desc: null
306
+ value: 1
307
+ num_beam_groups:
308
+ desc: null
309
+ value: 1
310
+ diversity_penalty:
311
+ desc: null
312
+ value: 0.0
313
+ temperature:
314
+ desc: null
315
+ value: 1.0
316
+ top_k:
317
+ desc: null
318
+ value: 50
319
+ top_p:
320
+ desc: null
321
+ value: 1.0
322
+ typical_p:
323
+ desc: null
324
+ value: 1.0
325
+ repetition_penalty:
326
+ desc: null
327
+ value: 1.0
328
+ length_penalty:
329
+ desc: null
330
+ value: 1.0
331
+ no_repeat_ngram_size:
332
+ desc: null
333
+ value: 0
334
+ encoder_no_repeat_ngram_size:
335
+ desc: null
336
+ value: 0
337
+ bad_words_ids:
338
+ desc: null
339
+ value: null
340
+ num_return_sequences:
341
+ desc: null
342
+ value: 1
343
+ output_scores:
344
+ desc: null
345
+ value: false
346
+ return_dict_in_generate:
347
+ desc: null
348
+ value: false
349
+ forced_bos_token_id:
350
+ desc: null
351
+ value: null
352
+ forced_eos_token_id:
353
+ desc: null
354
+ value: null
355
+ remove_invalid_values:
356
+ desc: null
357
+ value: false
358
+ exponential_decay_length_penalty:
359
+ desc: null
360
+ value: null
361
+ suppress_tokens:
362
+ desc: null
363
+ value: null
364
+ begin_suppress_tokens:
365
+ desc: null
366
+ value: null
367
+ architectures:
368
+ desc: null
369
+ value:
370
+ - LlavaForConditionalGeneration
371
+ finetuning_task:
372
+ desc: null
373
+ value: null
374
+ id2label:
375
+ desc: null
376
+ value:
377
+ '0': LABEL_0
378
+ '1': LABEL_1
379
+ label2id:
380
+ desc: null
381
+ value:
382
+ LABEL_0: 0
383
+ LABEL_1: 1
384
+ tokenizer_class:
385
+ desc: null
386
+ value: null
387
+ prefix:
388
+ desc: null
389
+ value: null
390
+ bos_token_id:
391
+ desc: null
392
+ value: null
393
+ pad_token_id:
394
+ desc: null
395
+ value: 32001
396
+ eos_token_id:
397
+ desc: null
398
+ value: null
399
+ sep_token_id:
400
+ desc: null
401
+ value: null
402
+ decoder_start_token_id:
403
+ desc: null
404
+ value: null
405
+ task_specific_params:
406
+ desc: null
407
+ value: null
408
+ problem_type:
409
+ desc: null
410
+ value: null
411
+ _name_or_path:
412
+ desc: null
413
+ value: llava-hf/llava-1.5-7b-hf
414
+ transformers_version:
415
+ desc: null
416
+ value: 4.39.3
417
+ model_type:
418
+ desc: null
419
+ value: llava
420
+ quantization_config:
421
+ desc: null
422
+ value:
423
+ quant_method: QuantizationMethod.BITS_AND_BYTES
424
+ _load_in_8bit: false
425
+ _load_in_4bit: true
426
+ llm_int8_threshold: 6.0
427
+ llm_int8_skip_modules: null
428
+ llm_int8_enable_fp32_cpu_offload: false
429
+ llm_int8_has_fp16_weight: false
430
+ bnb_4bit_quant_type: nf4
431
+ bnb_4bit_use_double_quant: true
432
+ bnb_4bit_compute_dtype: bfloat16
433
+ bnb_4bit_quant_storage: uint8
434
+ load_in_4bit: true
435
+ load_in_8bit: false
436
+ output_dir:
437
+ desc: null
438
+ value: ./
439
+ overwrite_output_dir:
440
+ desc: null
441
+ value: false
442
+ do_train:
443
+ desc: null
444
+ value: false
445
+ do_eval:
446
+ desc: null
447
+ value: false
448
+ do_predict:
449
+ desc: null
450
+ value: false
451
+ evaluation_strategy:
452
+ desc: null
453
+ value: 'no'
454
+ prediction_loss_only:
455
+ desc: null
456
+ value: false
457
+ per_device_train_batch_size:
458
+ desc: null
459
+ value: 2
460
+ per_device_eval_batch_size:
461
+ desc: null
462
+ value: 8
463
+ per_gpu_train_batch_size:
464
+ desc: null
465
+ value: null
466
+ per_gpu_eval_batch_size:
467
+ desc: null
468
+ value: null
469
+ gradient_accumulation_steps:
470
+ desc: null
471
+ value: 4
472
+ eval_accumulation_steps:
473
+ desc: null
474
+ value: null
475
+ eval_delay:
476
+ desc: null
477
+ value: 0
478
+ learning_rate:
479
+ desc: null
480
+ value: 0.0002
481
+ weight_decay:
482
+ desc: null
483
+ value: 0.0
484
+ adam_beta1:
485
+ desc: null
486
+ value: 0.9
487
+ adam_beta2:
488
+ desc: null
489
+ value: 0.999
490
+ adam_epsilon:
491
+ desc: null
492
+ value: 1.0e-08
493
+ max_grad_norm:
494
+ desc: null
495
+ value: 1.0
496
+ num_train_epochs:
497
+ desc: null
498
+ value: 1
499
+ max_steps:
500
+ desc: null
501
+ value: -1
502
+ lr_scheduler_type:
503
+ desc: null
504
+ value: constant
505
+ lr_scheduler_kwargs:
506
+ desc: null
507
+ value: {}
508
+ warmup_ratio:
509
+ desc: null
510
+ value: 0.0
511
+ warmup_steps:
512
+ desc: null
513
+ value: 0
514
+ log_level:
515
+ desc: null
516
+ value: passive
517
+ log_level_replica:
518
+ desc: null
519
+ value: warning
520
+ log_on_each_node:
521
+ desc: null
522
+ value: true
523
+ logging_dir:
524
+ desc: null
525
+ value: ./runs/May16_07-25-42_3064694ee8b3
526
+ logging_strategy:
527
+ desc: null
528
+ value: steps
529
+ logging_first_step:
530
+ desc: null
531
+ value: false
532
+ logging_steps:
533
+ desc: null
534
+ value: 1
535
+ logging_nan_inf_filter:
536
+ desc: null
537
+ value: true
538
+ save_strategy:
539
+ desc: null
540
+ value: epoch
541
+ save_steps:
542
+ desc: null
543
+ value: 500
544
+ save_total_limit:
545
+ desc: null
546
+ value: 3
547
+ save_safetensors:
548
+ desc: null
549
+ value: true
550
+ save_on_each_node:
551
+ desc: null
552
+ value: false
553
+ save_only_model:
554
+ desc: null
555
+ value: false
556
+ no_cuda:
557
+ desc: null
558
+ value: false
559
+ use_cpu:
560
+ desc: null
561
+ value: false
562
+ use_mps_device:
563
+ desc: null
564
+ value: false
565
+ seed:
566
+ desc: null
567
+ value: 42
568
+ data_seed:
569
+ desc: null
570
+ value: null
571
+ jit_mode_eval:
572
+ desc: null
573
+ value: false
574
+ use_ipex:
575
+ desc: null
576
+ value: false
577
+ bf16:
578
+ desc: null
579
+ value: false
580
+ fp16:
581
+ desc: null
582
+ value: true
583
+ fp16_opt_level:
584
+ desc: null
585
+ value: O1
586
+ half_precision_backend:
587
+ desc: null
588
+ value: auto
589
+ bf16_full_eval:
590
+ desc: null
591
+ value: false
592
+ fp16_full_eval:
593
+ desc: null
594
+ value: false
595
+ tf32:
596
+ desc: null
597
+ value: null
598
+ local_rank:
599
+ desc: null
600
+ value: 0
601
+ ddp_backend:
602
+ desc: null
603
+ value: null
604
+ tpu_num_cores:
605
+ desc: null
606
+ value: null
607
+ tpu_metrics_debug:
608
+ desc: null
609
+ value: false
610
+ debug:
611
+ desc: null
612
+ value: []
613
+ dataloader_drop_last:
614
+ desc: null
615
+ value: false
616
+ eval_steps:
617
+ desc: null
618
+ value: null
619
+ dataloader_num_workers:
620
+ desc: null
621
+ value: 0
622
+ dataloader_prefetch_factor:
623
+ desc: null
624
+ value: null
625
+ past_index:
626
+ desc: null
627
+ value: -1
628
+ run_name:
629
+ desc: null
630
+ value: ./
631
+ disable_tqdm:
632
+ desc: null
633
+ value: false
634
+ remove_unused_columns:
635
+ desc: null
636
+ value: false
637
+ label_names:
638
+ desc: null
639
+ value: null
640
+ load_best_model_at_end:
641
+ desc: null
642
+ value: false
643
+ metric_for_best_model:
644
+ desc: null
645
+ value: null
646
+ greater_is_better:
647
+ desc: null
648
+ value: null
649
+ ignore_data_skip:
650
+ desc: null
651
+ value: false
652
+ fsdp:
653
+ desc: null
654
+ value: []
655
+ fsdp_min_num_params:
656
+ desc: null
657
+ value: 0
658
+ fsdp_config:
659
+ desc: null
660
+ value:
661
+ min_num_params: 0
662
+ xla: false
663
+ xla_fsdp_v2: false
664
+ xla_fsdp_grad_ckpt: false
665
+ fsdp_transformer_layer_cls_to_wrap:
666
+ desc: null
667
+ value: null
668
+ accelerator_config:
669
+ desc: null
670
+ value:
671
+ split_batches: false
672
+ dispatch_batches: null
673
+ even_batches: true
674
+ use_seedable_sampler: true
675
+ deepspeed:
676
+ desc: null
677
+ value: null
678
+ label_smoothing_factor:
679
+ desc: null
680
+ value: 0.0
681
+ optim:
682
+ desc: null
683
+ value: adamw_bnb_8bit
684
+ optim_args:
685
+ desc: null
686
+ value: null
687
+ adafactor:
688
+ desc: null
689
+ value: false
690
+ group_by_length:
691
+ desc: null
692
+ value: false
693
+ length_column_name:
694
+ desc: null
695
+ value: length
696
+ report_to:
697
+ desc: null
698
+ value:
699
+ - tensorboard
700
+ - wandb
701
+ ddp_find_unused_parameters:
702
+ desc: null
703
+ value: null
704
+ ddp_bucket_cap_mb:
705
+ desc: null
706
+ value: null
707
+ ddp_broadcast_buffers:
708
+ desc: null
709
+ value: null
710
+ dataloader_pin_memory:
711
+ desc: null
712
+ value: true
713
+ dataloader_persistent_workers:
714
+ desc: null
715
+ value: false
716
+ skip_memory_metrics:
717
+ desc: null
718
+ value: true
719
+ use_legacy_prediction_loop:
720
+ desc: null
721
+ value: false
722
+ push_to_hub:
723
+ desc: null
724
+ value: false
725
+ resume_from_checkpoint:
726
+ desc: null
727
+ value: null
728
+ hub_model_id:
729
+ desc: null
730
+ value: null
731
+ hub_strategy:
732
+ desc: null
733
+ value: every_save
734
+ hub_token:
735
+ desc: null
736
+ value: <HUB_TOKEN>
737
+ hub_private_repo:
738
+ desc: null
739
+ value: false
740
+ hub_always_push:
741
+ desc: null
742
+ value: false
743
+ gradient_checkpointing:
744
+ desc: null
745
+ value: false
746
+ gradient_checkpointing_kwargs:
747
+ desc: null
748
+ value: null
749
+ include_inputs_for_metrics:
750
+ desc: null
751
+ value: false
752
+ fp16_backend:
753
+ desc: null
754
+ value: auto
755
+ push_to_hub_model_id:
756
+ desc: null
757
+ value: null
758
+ push_to_hub_organization:
759
+ desc: null
760
+ value: null
761
+ push_to_hub_token:
762
+ desc: null
763
+ value: <PUSH_TO_HUB_TOKEN>
764
+ mp_parameters:
765
+ desc: null
766
+ value: ''
767
+ auto_find_batch_size:
768
+ desc: null
769
+ value: false
770
+ full_determinism:
771
+ desc: null
772
+ value: false
773
+ torchdynamo:
774
+ desc: null
775
+ value: null
776
+ ray_scope:
777
+ desc: null
778
+ value: last
779
+ ddp_timeout:
780
+ desc: null
781
+ value: 1800
782
+ torch_compile:
783
+ desc: null
784
+ value: false
785
+ torch_compile_backend:
786
+ desc: null
787
+ value: null
788
+ torch_compile_mode:
789
+ desc: null
790
+ value: null
791
+ dispatch_batches:
792
+ desc: null
793
+ value: null
794
+ split_batches:
795
+ desc: null
796
+ value: null
797
+ include_tokens_per_second:
798
+ desc: null
799
+ value: false
800
+ include_num_input_tokens_seen:
801
+ desc: null
802
+ value: false
803
+ neftune_noise_alpha:
804
+ desc: null
805
+ value: null
806
+ optim_target_modules:
807
+ desc: null
808
+ value: null
wandb/run-20240516_072542-5ru1r69k/files/output.log ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
2
+ warnings.warn(
3
+ /opt/conda/lib/python3.10/site-packages/torch/utils/checkpoint.py:61: UserWarning: None of the inputs have requires_grad=True. Gradients will be None
4
+ warnings.warn(
5
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
6
+ /opt/conda/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py:144: FutureWarning: The `vocab_size` attribute is deprecated and will be removed in v4.42, Please use `text_config.vocab_size` instead.
7
+ warnings.warn(
8
+ /opt/conda/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py:104: FutureWarning: The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect
9
+ warnings.warn(
10
+ /opt/conda/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py:144: FutureWarning: The `vocab_size` attribute is deprecated and will be removed in v4.42, Please use `text_config.vocab_size` instead.
11
+ warnings.warn(
12
+ /opt/conda/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py:104: FutureWarning: The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect
wandb/run-20240516_072542-5ru1r69k/files/requirements.txt ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Babel==2.14.0
2
+ Boruta==0.3
3
+ Brotli==1.0.9
4
+ CVXcanon==0.1.2
5
+ Cartopy==0.23.0
6
+ Cython==3.0.8
7
+ Deprecated==1.2.14
8
+ Farama-Notifications==0.0.4
9
+ Flask==3.0.3
10
+ Geohash==1.0
11
+ GitPython==3.1.41
12
+ ImageHash==4.3.1
13
+ Janome==0.5.0
14
+ Jinja2==3.1.2
15
+ LunarCalendar==0.0.9
16
+ Mako==1.3.3
17
+ Markdown==3.5.2
18
+ MarkupSafe==2.1.3
19
+ MarkupSafe==2.1.5
20
+ Pillow==9.5.0
21
+ PuLP==2.8.0
22
+ PyArabic==0.6.15
23
+ PyJWT==2.8.0
24
+ PyMeeus==0.5.12
25
+ PySocks==1.7.1
26
+ PyUpSet==0.1.1.post7
27
+ PyWavelets==1.5.0
28
+ PyYAML==6.0.1
29
+ Pygments==2.17.2
30
+ Pympler==1.0.1
31
+ QtPy==2.4.1
32
+ Rtree==1.2.0
33
+ SQLAlchemy==2.0.25
34
+ SecretStorage==3.3.3
35
+ Send2Trash==1.8.2
36
+ Shapely==1.8.5.post1
37
+ Shimmy==1.3.0
38
+ SimpleITK==2.3.1
39
+ TPOT==0.12.1
40
+ Theano-PyMC==1.1.2
41
+ Theano==1.0.5
42
+ Wand==0.6.13
43
+ Werkzeug==3.0.2
44
+ absl-py==1.4.0
45
+ accelerate==0.29.3
46
+ access==1.1.9
47
+ affine==2.4.0
48
+ aiobotocore==2.12.3
49
+ aiofiles==22.1.0
50
+ aiohttp-cors==0.7.0
51
+ aiohttp==3.9.1
52
+ aioitertools==0.11.0
53
+ aiorwlock==1.3.0
54
+ aiosignal==1.3.1
55
+ aiosqlite==0.19.0
56
+ albumentations==1.4.0
57
+ alembic==1.13.1
58
+ altair==5.3.0
59
+ annotated-types==0.6.0
60
+ annoy==1.17.3
61
+ anyio==4.2.0
62
+ apache-beam==2.46.0
63
+ aplus==0.11.0
64
+ appdirs==1.4.4
65
+ archspec==0.2.3
66
+ argon2-cffi-bindings==21.2.0
67
+ argon2-cffi==23.1.0
68
+ array-record==0.5.0
69
+ arrow==1.3.0
70
+ arviz==0.18.0
71
+ astroid==3.1.0
72
+ astropy-iers-data==0.2024.4.15.2.45.49
73
+ astropy==6.0.1
74
+ asttokens==2.4.1
75
+ astunparse==1.6.3
76
+ async-lru==2.0.4
77
+ async-timeout==4.0.3
78
+ attrs==23.2.0
79
+ audioread==3.0.1
80
+ autopep8==2.0.4
81
+ backoff==2.2.1
82
+ bayesian-optimization==1.4.3
83
+ beatrix_jupyterlab==2023.128.151533
84
+ beautifulsoup4==4.12.2
85
+ bitsandbytes==0.43.1
86
+ blake3==0.2.1
87
+ bleach==6.1.0
88
+ blessed==1.20.0
89
+ blinker==1.7.0
90
+ blis==0.7.10
91
+ blosc2==2.6.2
92
+ bokeh==3.4.1
93
+ boltons==23.1.1
94
+ boto3==1.26.100
95
+ botocore==1.34.69
96
+ bq_helper==0.4.1
97
+ bqplot==0.12.43
98
+ branca==0.7.1
99
+ brewer2mpl==1.4.1
100
+ brotlipy==0.7.0
101
+ cached-property==1.5.2
102
+ cachetools==4.2.4
103
+ cachetools==5.3.2
104
+ catalogue==2.0.10
105
+ catalyst==22.4
106
+ catboost==1.2.3
107
+ category-encoders==2.6.3
108
+ certifi==2024.2.2
109
+ cesium==0.12.1
110
+ cffi==1.16.0
111
+ charset-normalizer==3.3.2
112
+ chex==0.1.86
113
+ cleverhans==4.0.0
114
+ click-plugins==1.1.1
115
+ click==8.1.7
116
+ cligj==0.7.2
117
+ cloud-tpu-client==0.10
118
+ cloud-tpu-profiler==2.4.0
119
+ cloudpathlib==0.16.0
120
+ cloudpickle==2.2.1
121
+ cloudpickle==3.0.0
122
+ cmdstanpy==1.2.2
123
+ colorama==0.4.6
124
+ colorcet==3.1.0
125
+ colorful==0.5.6
126
+ colorlog==6.8.2
127
+ colorlover==0.3.0
128
+ comm==0.2.1
129
+ conda-libmamba-solver==23.7.0
130
+ conda-package-handling==2.2.0
131
+ conda==23.7.4
132
+ conda_package_streaming==0.9.0
133
+ confection==0.1.4
134
+ contextily==1.6.0
135
+ contourpy==1.2.0
136
+ contourpy==1.2.1
137
+ convertdate==2.4.0
138
+ crcmod==1.7
139
+ cryptography==41.0.7
140
+ cuda-python==12.4.0
141
+ cudf==23.8.0
142
+ cufflinks==0.17.3
143
+ cuml==23.8.0
144
+ cupy==13.0.0
145
+ cycler==0.12.1
146
+ cymem==2.0.8
147
+ cytoolz==0.12.3
148
+ daal4py==2024.3.0
149
+ daal==2024.3.0
150
+ dacite==1.8.1
151
+ dask-cuda==23.8.0
152
+ dask-cudf==23.8.0
153
+ dask-expr==1.0.11
154
+ dask==2024.4.1
155
+ dataclasses-json==0.6.4
156
+ dataproc_jupyter_plugin==0.1.66
157
+ datasets==2.18.0
158
+ datashader==0.16.0
159
+ datatile==1.0.3
160
+ db-dtypes==1.2.0
161
+ deap==1.4.1
162
+ debugpy==1.8.0
163
+ decorator==5.1.1
164
+ deepdiff==7.0.1
165
+ defusedxml==0.7.1
166
+ deprecation==2.1.0
167
+ descartes==1.1.0
168
+ dill==0.3.8
169
+ dipy==1.9.0
170
+ distlib==0.3.8
171
+ distributed==2023.7.1
172
+ distro==1.9.0
173
+ dm-tree==0.1.8
174
+ docker-pycreds==0.4.0
175
+ docker==7.0.0
176
+ docopt==0.6.2
177
+ docstring-parser==0.15
178
+ docstring-to-markdown==0.15
179
+ docutils==0.21.1
180
+ earthengine-api==0.1.399
181
+ easydict==1.13
182
+ easyocr==1.7.1
183
+ ecos==2.0.13
184
+ eli5==0.13.0
185
+ emoji==2.11.0
186
+ en-core-web-lg==3.7.1
187
+ en-core-web-sm==3.7.1
188
+ entrypoints==0.4
189
+ ephem==4.1.5
190
+ esda==2.5.1
191
+ essentia==2.1b6.dev1110
192
+ et-xmlfile==1.1.0
193
+ etils==1.6.0
194
+ exceptiongroup==1.2.0
195
+ executing==2.0.1
196
+ explainable-ai-sdk==1.3.3
197
+ fastai==2.7.14
198
+ fastapi==0.108.0
199
+ fastavro==1.9.3
200
+ fastcore==1.5.29
201
+ fastdownload==0.0.7
202
+ fasteners==0.19
203
+ fastjsonschema==2.19.1
204
+ fastprogress==1.0.3
205
+ fastrlock==0.8.2
206
+ fasttext==0.9.2
207
+ feather-format==0.4.1
208
+ featuretools==1.30.0
209
+ filelock==3.13.1
210
+ fiona==1.9.6
211
+ fitter==1.7.0
212
+ flake8==7.0.0
213
+ flashtext==2.7
214
+ flatbuffers==23.5.26
215
+ flax==0.8.2
216
+ folium==0.16.0
217
+ fonttools==4.47.0
218
+ fonttools==4.51.0
219
+ fqdn==1.5.1
220
+ frozendict==2.4.2
221
+ frozenlist==1.4.1
222
+ fsspec==2024.2.0
223
+ fsspec==2024.3.1
224
+ funcy==2.0
225
+ fury==0.10.0
226
+ future==1.0.0
227
+ fuzzywuzzy==0.18.0
228
+ gast==0.5.4
229
+ gatspy==0.3
230
+ gcsfs==2024.2.0
231
+ gensim==4.3.2
232
+ geographiclib==2.0
233
+ geojson==3.1.0
234
+ geopandas==0.14.3
235
+ geoplot==0.5.1
236
+ geopy==2.4.1
237
+ geoviews==1.12.0
238
+ ggplot==0.11.5
239
+ giddy==2.3.5
240
+ gitdb==4.0.11
241
+ google-ai-generativelanguage==0.6.2
242
+ google-api-core==2.11.1
243
+ google-api-core==2.18.0
244
+ google-api-python-client==2.126.0
245
+ google-apitools==0.5.31
246
+ google-auth-httplib2==0.2.0
247
+ google-auth-oauthlib==1.2.0
248
+ google-auth==2.26.1
249
+ google-cloud-aiplatform==0.6.0a1
250
+ google-cloud-artifact-registry==1.10.0
251
+ google-cloud-automl==1.0.1
252
+ google-cloud-bigquery==2.34.4
253
+ google-cloud-bigtable==1.7.3
254
+ google-cloud-core==2.4.1
255
+ google-cloud-datastore==2.19.0
256
+ google-cloud-dlp==3.14.0
257
+ google-cloud-jupyter-config==0.0.5
258
+ google-cloud-language==2.13.3
259
+ google-cloud-monitoring==2.18.0
260
+ google-cloud-pubsub==2.19.0
261
+ google-cloud-pubsublite==1.9.0
262
+ google-cloud-recommendations-ai==0.7.1
263
+ google-cloud-resource-manager==1.11.0
264
+ google-cloud-spanner==3.40.1
265
+ google-cloud-storage==1.44.0
266
+ google-cloud-translate==3.12.1
267
+ google-cloud-videointelligence==2.13.3
268
+ google-cloud-vision==2.8.0
269
+ google-crc32c==1.5.0
270
+ google-generativeai==0.5.1
271
+ google-pasta==0.2.0
272
+ google-resumable-media==2.7.0
273
+ googleapis-common-protos==1.62.0
274
+ gplearn==0.4.2
275
+ gpustat==1.0.0
276
+ gpxpy==1.6.2
277
+ graphviz==0.20.3
278
+ greenlet==3.0.3
279
+ grpc-google-iam-v1==0.12.7
280
+ grpcio-status==1.48.1
281
+ grpcio-status==1.48.2
282
+ grpcio==1.51.1
283
+ grpcio==1.60.0
284
+ gviz-api==1.10.0
285
+ gym-notices==0.0.8
286
+ gym==0.26.2
287
+ gymnasium==0.29.0
288
+ h11==0.14.0
289
+ h2o==3.46.0.1
290
+ h5netcdf==1.3.0
291
+ h5py==3.10.0
292
+ haversine==2.8.1
293
+ hdfs==2.7.3
294
+ hep-ml==0.7.2
295
+ hijri-converter==2.3.1
296
+ hmmlearn==0.3.2
297
+ holidays==0.24
298
+ holoviews==1.18.3
299
+ hpsklearn==0.1.0
300
+ html5lib==1.1
301
+ htmlmin==0.1.12
302
+ httpcore==1.0.5
303
+ httplib2==0.21.0
304
+ httptools==0.6.1
305
+ httpx==0.27.0
306
+ huggingface-hub==0.22.2
307
+ hunspell==0.5.5
308
+ hydra-slayer==0.5.0
309
+ hyperopt==0.2.7
310
+ hypertools==0.8.0
311
+ idna==3.6
312
+ igraph==0.11.4
313
+ imagecodecs==2024.1.1
314
+ imageio==2.33.1
315
+ imbalanced-learn==0.12.2
316
+ imgaug==0.4.0
317
+ importlib-metadata==6.11.0
318
+ importlib-metadata==7.0.1
319
+ importlib-resources==6.1.1
320
+ inequality==1.0.1
321
+ iniconfig==2.0.0
322
+ ipydatawidgets==4.3.5
323
+ ipykernel==6.28.0
324
+ ipyleaflet==0.18.2
325
+ ipympl==0.7.0
326
+ ipython-genutils==0.2.0
327
+ ipython-genutils==0.2.0
328
+ ipython-sql==0.5.0
329
+ ipython==8.20.0
330
+ ipyvolume==0.6.3
331
+ ipyvue==1.11.0
332
+ ipyvuetify==1.9.4
333
+ ipywebrtc==0.6.0
334
+ ipywidgets==7.7.1
335
+ isoduration==20.11.0
336
+ isort==5.13.2
337
+ isoweek==1.3.3
338
+ itsdangerous==2.2.0
339
+ jaraco.classes==3.3.0
340
+ jax-jumpy==1.0.0
341
+ jax==0.4.23
342
+ jaxlib==0.4.23.dev20240116
343
+ jedi==0.19.1
344
+ jeepney==0.8.0
345
+ jieba==0.42.1
346
+ jmespath==1.0.1
347
+ joblib==1.4.0
348
+ json5==0.9.14
349
+ jsonpatch==1.33
350
+ jsonpointer==2.4
351
+ jsonschema-specifications==2023.12.1
352
+ jsonschema==4.20.0
353
+ jupyter-console==6.6.3
354
+ jupyter-events==0.9.0
355
+ jupyter-http-over-ws==0.0.8
356
+ jupyter-lsp==1.5.1
357
+ jupyter-server-mathjax==0.2.6
358
+ jupyter-ydoc==0.2.5
359
+ jupyter_client==7.4.9
360
+ jupyter_client==8.6.0
361
+ jupyter_core==5.7.1
362
+ jupyter_server==2.12.5
363
+ jupyter_server_fileid==0.9.1
364
+ jupyter_server_proxy==4.1.0
365
+ jupyter_server_terminals==0.5.1
366
+ jupyter_server_ydoc==0.8.0
367
+ jupyterlab-lsp==5.1.0
368
+ jupyterlab-widgets==3.0.9
369
+ jupyterlab==4.1.6
370
+ jupyterlab_git==0.44.0
371
+ jupyterlab_pygments==0.3.0
372
+ jupyterlab_server==2.25.2
373
+ jupytext==1.16.0
374
+ kaggle-environments==1.14.3
375
+ kaggle==1.6.12
376
+ kagglehub==0.2.3
377
+ keras-cv==0.8.2
378
+ keras-nlp==0.9.3
379
+ keras-tuner==1.4.6
380
+ keras==3.2.1
381
+ kernels-mixer==0.0.7
382
+ keyring==24.3.0
383
+ keyrings.google-artifactregistry-auth==1.1.2
384
+ kfp-pipeline-spec==0.2.2
385
+ kfp-server-api==2.0.5
386
+ kfp==2.5.0
387
+ kiwisolver==1.4.5
388
+ kmapper==2.0.1
389
+ kmodes==0.12.2
390
+ korean-lunar-calendar==0.3.1
391
+ kornia==0.7.2
392
+ kornia_rs==0.1.3
393
+ kt-legacy==1.0.5
394
+ kubernetes==26.1.0
395
+ langcodes==3.3.0
396
+ langid==1.1.6
397
+ lazy_loader==0.3
398
+ learntools==0.3.4
399
+ leven==1.0.4
400
+ libclang==16.0.6
401
+ libmambapy==1.5.0
402
+ libpysal==4.9.2
403
+ librosa==0.10.1
404
+ lightgbm==4.2.0
405
+ lightning-utilities==0.11.2
406
+ lime==0.2.0.1
407
+ line-profiler==4.1.2
408
+ linkify-it-py==2.0.3
409
+ llvmlite==0.41.1
410
+ llvmlite==0.42.0
411
+ lml==0.1.0
412
+ locket==1.0.0
413
+ loguru==0.7.2
414
+ lxml==5.2.1
415
+ lz4==4.3.3
416
+ mamba==1.5.0
417
+ mapclassify==2.6.1
418
+ markdown-it-py==3.0.0
419
+ marshmallow==3.21.1
420
+ matplotlib-inline==0.1.6
421
+ matplotlib-venn==0.11.10
422
+ matplotlib==3.7.5
423
+ matplotlib==3.8.4
424
+ mccabe==0.7.0
425
+ mdit-py-plugins==0.4.0
426
+ mdurl==0.1.2
427
+ memory-profiler==0.61.0
428
+ menuinst==2.0.1
429
+ mercantile==1.2.1
430
+ mgwr==2.2.1
431
+ missingno==0.5.2
432
+ mistune==0.8.4
433
+ mizani==0.11.1
434
+ ml-dtypes==0.2.0
435
+ mlcrate==0.2.0
436
+ mlens==0.2.3
437
+ mlxtend==0.23.1
438
+ mne==1.6.1
439
+ mnist==0.2.2
440
+ momepy==0.7.0
441
+ more-itertools==10.2.0
442
+ mpld3==0.5.10
443
+ mpmath==1.3.0
444
+ msgpack==1.0.7
445
+ multidict==6.0.4
446
+ multimethod==1.10
447
+ multipledispatch==1.0.0
448
+ multiprocess==0.70.16
449
+ munkres==1.1.4
450
+ murmurhash==1.0.10
451
+ mypy-extensions==1.0.0
452
+ namex==0.0.8
453
+ nb-conda-kernels==2.3.1
454
+ nb_conda==2.2.1
455
+ nbclassic==1.0.0
456
+ nbclient==0.5.13
457
+ nbconvert==6.4.5
458
+ nbdime==3.2.0
459
+ nbformat==5.9.2
460
+ ndindex==1.8
461
+ nest-asyncio==1.5.8
462
+ networkx==3.2.1
463
+ nibabel==5.2.1
464
+ nilearn==0.10.4
465
+ ninja==1.11.1.1
466
+ nltk==3.2.4
467
+ nose==1.3.7
468
+ notebook==6.5.4
469
+ notebook==6.5.6
470
+ notebook_executor==0.2
471
+ notebook_shim==0.2.3
472
+ numba==0.58.1
473
+ numba==0.59.1
474
+ numexpr==2.10.0
475
+ numpy==1.26.4
476
+ nvidia-ml-py==11.495.46
477
+ nvtx==0.2.10
478
+ oauth2client==4.1.3
479
+ oauthlib==3.2.2
480
+ objsize==0.6.1
481
+ odfpy==1.4.1
482
+ olefile==0.47
483
+ onnx==1.16.0
484
+ opencensus-context==0.1.3
485
+ opencensus==0.11.4
486
+ opencv-contrib-python==4.9.0.80
487
+ opencv-python-headless==4.9.0.80
488
+ opencv-python==4.9.0.80
489
+ openpyxl==3.1.2
490
+ openslide-python==1.3.1
491
+ opentelemetry-api==1.22.0
492
+ opentelemetry-exporter-otlp-proto-common==1.22.0
493
+ opentelemetry-exporter-otlp-proto-grpc==1.22.0
494
+ opentelemetry-exporter-otlp-proto-http==1.22.0
495
+ opentelemetry-exporter-otlp==1.22.0
496
+ opentelemetry-proto==1.22.0
497
+ opentelemetry-sdk==1.22.0
498
+ opentelemetry-semantic-conventions==0.43b0
499
+ opt-einsum==3.3.0
500
+ optax==0.2.2
501
+ optree==0.11.0
502
+ optuna==3.6.1
503
+ orbax-checkpoint==0.5.9
504
+ ordered-set==4.1.0
505
+ orjson==3.9.10
506
+ ortools==9.4.1874
507
+ osmnx==1.9.2
508
+ overrides==7.4.0
509
+ packaging==21.3
510
+ pandas-datareader==0.10.0
511
+ pandas-profiling==3.6.6
512
+ pandas-summary==0.2.0
513
+ pandas==2.1.4
514
+ pandas==2.2.2
515
+ pandasql==0.7.3
516
+ pandocfilters==1.5.0
517
+ panel==1.4.1
518
+ papermill==2.5.0
519
+ param==2.1.0
520
+ parso==0.8.3
521
+ partd==1.4.1
522
+ path.py==12.5.0
523
+ path==16.14.0
524
+ pathos==0.3.2
525
+ pathy==0.10.3
526
+ patsy==0.5.6
527
+ pdf2image==1.17.0
528
+ peft==0.10.0
529
+ pettingzoo==1.24.0
530
+ pexpect==4.8.0
531
+ pexpect==4.9.0
532
+ phik==0.12.4
533
+ pickleshare==0.7.5
534
+ pillow==10.3.0
535
+ pip==23.3.2
536
+ pkgutil_resolve_name==1.3.10
537
+ platformdirs==4.2.0
538
+ plotly-express==0.4.1
539
+ plotly==5.18.0
540
+ plotnine==0.13.4
541
+ pluggy==1.4.0
542
+ pointpats==2.4.0
543
+ polars==0.20.21
544
+ polyglot==16.7.4
545
+ pooch==1.8.1
546
+ pox==0.3.4
547
+ ppca==0.0.4
548
+ ppft==1.7.6.8
549
+ preprocessing==0.1.13
550
+ preshed==3.0.9
551
+ prettytable==3.9.0
552
+ progressbar2==4.4.2
553
+ prometheus-client==0.19.0
554
+ promise==2.3
555
+ prompt-toolkit==3.0.42
556
+ prompt-toolkit==3.0.43
557
+ prophet==1.1.1
558
+ proto-plus==1.23.0
559
+ protobuf==3.20.3
560
+ protobuf==4.21.12
561
+ psutil==5.9.3
562
+ psutil==5.9.7
563
+ ptyprocess==0.7.0
564
+ pudb==2024.1
565
+ pure-eval==0.2.2
566
+ py-cpuinfo==9.0.0
567
+ py-spy==0.3.14
568
+ py4j==0.10.9.7
569
+ pyLDAvis==3.4.1
570
+ pyOpenSSL==23.3.0
571
+ pyaml==23.12.0
572
+ pyarrow-hotfix==0.6
573
+ pyarrow==15.0.2
574
+ pyasn1-modules==0.3.0
575
+ pyasn1==0.5.1
576
+ pybind11==2.12.0
577
+ pyclipper==1.3.0.post5
578
+ pycodestyle==2.11.1
579
+ pycosat==0.6.6
580
+ pycparser==2.21
581
+ pycryptodome==3.20.0
582
+ pyct==0.5.0
583
+ pycuda==2024.1
584
+ pydantic==2.5.3
585
+ pydantic==2.7.0
586
+ pydantic_core==2.14.6
587
+ pydantic_core==2.18.1
588
+ pydegensac==0.1.2
589
+ pydicom==2.4.4
590
+ pydocstyle==6.3.0
591
+ pydot==1.4.2
592
+ pydub==0.25.1
593
+ pyemd==1.0.0
594
+ pyerfa==2.0.1.4
595
+ pyexcel-io==0.6.6
596
+ pyexcel-ods==0.6.0
597
+ pyflakes==3.2.0
598
+ pygltflib==1.16.2
599
+ pykalman==0.9.7
600
+ pylibraft==23.8.0
601
+ pylint==3.1.0
602
+ pymc3==3.11.4
603
+ pymongo==3.13.0
604
+ pynndescent==0.5.12
605
+ pynvml==11.4.1
606
+ pynvrtc==9.2
607
+ pyparsing==3.1.1
608
+ pyparsing==3.1.2
609
+ pypdf==4.2.0
610
+ pyproj==3.6.1
611
+ pysal==24.1
612
+ pyshp==2.3.1
613
+ pytesseract==0.3.10
614
+ pytest==8.1.1
615
+ python-bidi==0.4.2
616
+ python-dateutil==2.9.0.post0
617
+ python-dotenv==1.0.0
618
+ python-json-logger==2.0.7
619
+ python-louvain==0.16
620
+ python-lsp-jsonrpc==1.1.2
621
+ python-lsp-server==1.11.0
622
+ python-slugify==8.0.4
623
+ python-utils==3.8.2
624
+ pythreejs==2.4.2
625
+ pytoolconfig==1.3.1
626
+ pytools==2024.1.1
627
+ pytorch-ignite==0.5.0.post2
628
+ pytorch-lightning==2.2.2
629
+ pytz==2023.3.post1
630
+ pytz==2024.1
631
+ pyu2f==0.1.5
632
+ pyviz_comms==3.0.2
633
+ pyzmq==24.0.1
634
+ pyzmq==25.1.2
635
+ qgrid==1.3.1
636
+ qtconsole==5.5.1
637
+ quantecon==0.7.2
638
+ qudida==0.0.4
639
+ raft-dask==23.8.0
640
+ rasterio==1.3.10
641
+ rasterstats==0.19.0
642
+ ray-cpp==2.9.0
643
+ ray==2.9.0
644
+ referencing==0.32.1
645
+ regex==2023.12.25
646
+ requests-oauthlib==1.3.1
647
+ requests-toolbelt==0.10.1
648
+ requests==2.31.0
649
+ retrying==1.3.3
650
+ retrying==1.3.4
651
+ rfc3339-validator==0.1.4
652
+ rfc3986-validator==0.1.1
653
+ rgf-python==3.12.0
654
+ rich-click==1.7.4
655
+ rich==13.7.0
656
+ rich==13.7.1
657
+ rmm==23.8.0
658
+ rope==1.13.0
659
+ rpds-py==0.16.2
660
+ rsa==4.9
661
+ ruamel-yaml-conda==0.15.100
662
+ ruamel.yaml.clib==0.2.7
663
+ ruamel.yaml==0.17.40
664
+ s2sphere==0.2.5
665
+ s3fs==2024.2.0
666
+ s3transfer==0.6.2
667
+ safetensors==0.4.3
668
+ scattertext==0.1.19
669
+ scikit-image==0.22.0
670
+ scikit-learn-intelex==2024.3.0
671
+ scikit-learn==1.2.2
672
+ scikit-multilearn==0.2.0
673
+ scikit-optimize==0.10.1
674
+ scikit-plot==0.3.7
675
+ scikit-surprise==1.1.3
676
+ scipy==1.11.4
677
+ scipy==1.13.0
678
+ seaborn==0.12.2
679
+ segment_anything==1.0
680
+ segregation==2.5
681
+ semver==3.0.2
682
+ sentencepiece==0.2.0
683
+ sentry-sdk==1.45.0
684
+ setproctitle==1.3.3
685
+ setuptools-git==1.2
686
+ setuptools-scm==8.0.4
687
+ setuptools==69.0.3
688
+ shap==0.44.1
689
+ shapely==2.0.4
690
+ shellingham==1.5.4
691
+ shtab==1.7.1
692
+ simpervisor==1.0.0
693
+ simplejson==3.19.2
694
+ six==1.16.0
695
+ sklearn-pandas==2.2.0
696
+ slicer==0.0.7
697
+ smart-open==6.4.0
698
+ smmap==5.0.1
699
+ sniffio==1.3.0
700
+ snowballstemmer==2.2.0
701
+ snuggs==1.4.7
702
+ sortedcontainers==2.4.0
703
+ soundfile==0.12.1
704
+ soupsieve==2.5
705
+ soxr==0.3.7
706
+ spacy-legacy==3.0.12
707
+ spacy-loggers==1.0.5
708
+ spacy==3.7.3
709
+ spaghetti==1.7.5.post1
710
+ spectral==0.23.1
711
+ spglm==1.1.0
712
+ sphinx-rtd-theme==0.2.4
713
+ spint==1.0.7
714
+ splot==1.1.5.post1
715
+ spopt==0.6.0
716
+ spreg==1.4.2
717
+ spvcm==0.3.0
718
+ sqlparse==0.4.4
719
+ squarify==0.4.3
720
+ srsly==2.4.8
721
+ stable-baselines3==2.1.0
722
+ stack-data==0.6.2
723
+ stack-data==0.6.3
724
+ stanio==0.5.0
725
+ starlette==0.32.0.post1
726
+ statsmodels==0.14.1
727
+ stemming==1.0.1
728
+ stop-words==2018.7.23
729
+ stopit==1.1.2
730
+ stumpy==1.12.0
731
+ sympy==1.12
732
+ tables==3.9.2
733
+ tabulate==0.9.0
734
+ tangled-up-in-unicode==0.2.0
735
+ tbb==2021.12.0
736
+ tblib==3.0.0
737
+ tenacity==8.2.3
738
+ tensorboard-data-server==0.7.2
739
+ tensorboard-plugin-profile==2.15.0
740
+ tensorboard==2.15.1
741
+ tensorboardX==2.6.2.2
742
+ tensorflow-cloud==0.1.16
743
+ tensorflow-datasets==4.9.4
744
+ tensorflow-decision-forests==1.8.1
745
+ tensorflow-estimator==2.15.0
746
+ tensorflow-hub==0.16.1
747
+ tensorflow-io-gcs-filesystem==0.35.0
748
+ tensorflow-io==0.35.0
749
+ tensorflow-metadata==0.14.0
750
+ tensorflow-probability==0.23.0
751
+ tensorflow-serving-api==2.14.1
752
+ tensorflow-text==2.15.0
753
+ tensorflow-transform==0.14.0
754
+ tensorflow==2.15.0
755
+ tensorstore==0.1.56
756
+ termcolor==2.4.0
757
+ terminado==0.18.0
758
+ testpath==0.6.0
759
+ text-unidecode==1.3
760
+ textblob==0.18.0.post0
761
+ texttable==1.7.0
762
+ tf_keras==2.15.1
763
+ tfp-nightly==0.24.0.dev0
764
+ thinc==8.2.2
765
+ threadpoolctl==3.2.0
766
+ tifffile==2023.12.9
767
+ timm==0.9.16
768
+ tinycss2==1.2.1
769
+ tobler==0.11.2
770
+ tokenizers==0.15.2
771
+ toml==0.10.2
772
+ tomli==2.0.1
773
+ tomlkit==0.12.4
774
+ toolz==0.12.1
775
+ torch==2.1.2
776
+ torchaudio==2.1.2
777
+ torchdata==0.7.1
778
+ torchinfo==1.8.0
779
+ torchmetrics==1.3.2
780
+ torchtext==0.16.2
781
+ torchvision==0.16.2
782
+ tornado==6.3.3
783
+ tqdm==4.66.1
784
+ traceml==1.0.8
785
+ traitlets==5.9.0
786
+ traittypes==0.2.1
787
+ transformers==4.39.3
788
+ treelite-runtime==3.2.0
789
+ treelite==3.2.0
790
+ trl==0.8.6
791
+ truststore==0.8.0
792
+ trx-python==0.2.9
793
+ tsfresh==0.20.2
794
+ typeguard==4.1.5
795
+ typer==0.9.0
796
+ typer==0.9.4
797
+ types-python-dateutil==2.8.19.20240106
798
+ typing-inspect==0.9.0
799
+ typing-utils==0.1.0
800
+ typing_extensions==4.9.0
801
+ tyro==0.8.4
802
+ tzdata==2023.4
803
+ uc-micro-py==1.0.3
804
+ ucx-py==0.33.0
805
+ ujson==5.9.0
806
+ umap-learn==0.5.6
807
+ unicodedata2==15.1.0
808
+ update-checker==0.18.0
809
+ uri-template==1.3.0
810
+ uritemplate==3.0.1
811
+ urllib3==1.26.18
812
+ urllib3==2.1.0
813
+ urwid==2.6.10
814
+ urwid_readline==0.14
815
+ uvicorn==0.25.0
816
+ uvloop==0.19.0
817
+ vaex-astro==0.9.3
818
+ vaex-core==4.17.1
819
+ vaex-hdf5==0.14.1
820
+ vaex-jupyter==0.8.2
821
+ vaex-ml==0.18.3
822
+ vaex-server==0.9.0
823
+ vaex-viz==0.5.4
824
+ vaex==4.17.0
825
+ vec_noise==1.1.4
826
+ vecstack==0.4.0
827
+ virtualenv==20.21.0
828
+ visions==0.7.5
829
+ vowpalwabbit==9.9.0
830
+ vtk==9.3.0
831
+ wandb==0.16.6
832
+ wasabi==1.1.2
833
+ watchfiles==0.21.0
834
+ wavio==0.0.8
835
+ wcwidth==0.2.13
836
+ weasel==0.3.4
837
+ webcolors==1.13
838
+ webencodings==0.5.1
839
+ websocket-client==1.7.0
840
+ websockets==12.0
841
+ wfdb==4.1.2
842
+ whatthepatch==1.0.5
843
+ wheel==0.42.0
844
+ widgetsnbextension==3.6.6
845
+ witwidget==1.8.1
846
+ woodwork==0.30.0
847
+ wordcloud==1.9.3
848
+ wordsegment==1.3.1
849
+ wrapt==1.14.1
850
+ xarray-einstats==0.7.0
851
+ xarray==2024.3.0
852
+ xgboost==2.0.3
853
+ xvfbwrapper==0.2.9
854
+ xxhash==3.4.1
855
+ xyzservices==2024.4.0
856
+ y-py==0.6.2
857
+ yapf==0.40.2
858
+ yarl==1.9.3
859
+ yarl==1.9.4
860
+ ydata-profiling==4.6.4
861
+ yellowbrick==1.5
862
+ ypy-websocket==0.8.4
863
+ zict==3.0.0
864
+ zipp==3.17.0
865
+ zstandard==0.22.0
wandb/run-20240516_072542-5ru1r69k/files/wandb-metadata.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.133+-x86_64-with-glibc2.31",
3
+ "python": "3.10.13",
4
+ "heartbeatAt": "2024-05-16T07:25:43.262193",
5
+ "startedAt": "2024-05-16T07:25:42.951047",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "-f",
10
+ "/tmp/tmpqsb6llcs.json",
11
+ "--HistoryManager.hist_file=:memory:"
12
+ ],
13
+ "state": "running",
14
+ "program": "<python with no main file>",
15
+ "codePathLocal": null,
16
+ "host": "3064694ee8b3",
17
+ "username": "root",
18
+ "executable": "/opt/conda/bin/python",
19
+ "cpu_count": 2,
20
+ "cpu_count_logical": 4,
21
+ "cpu_freq": {
22
+ "current": 2000.142,
23
+ "min": 0.0,
24
+ "max": 0.0
25
+ },
26
+ "cpu_freq_per_core": [
27
+ {
28
+ "current": 2000.142,
29
+ "min": 0.0,
30
+ "max": 0.0
31
+ },
32
+ {
33
+ "current": 2000.142,
34
+ "min": 0.0,
35
+ "max": 0.0
36
+ },
37
+ {
38
+ "current": 2000.142,
39
+ "min": 0.0,
40
+ "max": 0.0
41
+ },
42
+ {
43
+ "current": 2000.142,
44
+ "min": 0.0,
45
+ "max": 0.0
46
+ }
47
+ ],
48
+ "disk": {
49
+ "/": {
50
+ "total": 8062.387607574463,
51
+ "used": 5611.894878387451
52
+ }
53
+ },
54
+ "gpu": "Tesla T4",
55
+ "gpu_count": 2,
56
+ "gpu_devices": [
57
+ {
58
+ "name": "Tesla T4",
59
+ "memory_total": 16106127360
60
+ },
61
+ {
62
+ "name": "Tesla T4",
63
+ "memory_total": 16106127360
64
+ }
65
+ ],
66
+ "memory": {
67
+ "total": 31.357559204101562
68
+ }
69
+ }
wandb/run-20240516_072542-5ru1r69k/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train/loss": 1.2111, "train/grad_norm": 0.3055819571018219, "train/learning_rate": 0.0002, "train/epoch": 0.99, "train/global_step": 45, "_timestamp": 1715849610.9547303, "_runtime": 5267.9963991642, "_step": 45, "train_runtime": 5268.0425, "train_samples_per_second": 0.069, "train_steps_per_second": 0.009, "total_flos": 4.525072831473254e+16, "train_loss": 1.5121496227052478}
wandb/run-20240516_072542-5ru1r69k/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20240516_072542-5ru1r69k/logs/debug.log ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Current SDK version is 0.16.6
2
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Configure stats pid to 24
3
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-16 07:25:42,952 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
7
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
8
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
9
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:_log_setup():521] Logging user logs to /kaggle/working/wandb/run-20240516_072542-5ru1r69k/logs/debug.log
11
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:_log_setup():522] Logging internal logs to /kaggle/working/wandb/run-20240516_072542-5ru1r69k/logs/debug-internal.log
12
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():561] calling init triggers
13
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():568] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():611] starting backend
16
+ 2024-05-16 07:25:42,953 INFO MainThread:24 [wandb_init.py:init():615] setting up manager
17
+ 2024-05-16 07:25:42,955 INFO MainThread:24 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-16 07:25:42,958 INFO MainThread:24 [wandb_init.py:init():623] backend started and connected
19
+ 2024-05-16 07:25:42,962 INFO MainThread:24 [wandb_init.py:init():715] updated telemetry
20
+ 2024-05-16 07:25:42,967 INFO MainThread:24 [wandb_init.py:init():748] communicating run to backend with 90.0 second timeout
21
+ 2024-05-16 07:25:43,132 INFO MainThread:24 [wandb_run.py:_on_init():2357] communicating current version
22
+ 2024-05-16 07:25:43,218 INFO MainThread:24 [wandb_run.py:_on_init():2366] got version response upgrade_message: "wandb version 0.17.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
23
+
24
+ 2024-05-16 07:25:43,220 INFO MainThread:24 [wandb_init.py:init():799] starting run threads in backend
25
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_console_start():2335] atexit reg
26
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_redirect():2190] redirect: wrap_raw
27
+ 2024-05-16 07:25:59,293 INFO MainThread:24 [wandb_run.py:_redirect():2255] Wrapping output streams.
28
+ 2024-05-16 07:25:59,294 INFO MainThread:24 [wandb_run.py:_redirect():2280] Redirects installed.
29
+ 2024-05-16 07:25:59,295 INFO MainThread:24 [wandb_init.py:init():842] run started, returning control to user process
30
+ 2024-05-16 07:25:59,302 INFO MainThread:24 [wandb_run.py:_config_callback():1347] config_cb None None {'ignore_index': -100, 'image_token_index': 32000, 'projector_hidden_act': 'gelu', 'vision_feature_select_strategy': 'default', 'vision_feature_layer': -2, 'vision_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', 'model_type': 'clip_vision_model', 'vocab_size': 32000, 'hidden_size': 1024, 'intermediate_size': 4096, 'projection_dim': 768, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'num_channels': 3, 'patch_size': 14, 'image_size': 336, 'initializer_range': 0.02, 'initializer_factor': 1.0, 'attention_dropout': 0.0, 'layer_norm_eps': 1e-05, 'hidden_act': 'quick_gelu'}, 'text_config': {'vocab_size': 32064, 'max_position_embeddings': 4096, 'hidden_size': 4096, 'intermediate_size': 11008, 'num_hidden_layers': 32, 'num_attention_heads': 32, 'num_key_value_heads': 32, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': True, 'rope_theta': 10000.0, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': None, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'lmsys/vicuna-7b-v1.5', 'model_type': 'llama'}, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlavaForConditionalGeneration'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 32001, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'llava-hf/llava-1.5-7b-hf', 'transformers_version': '4.39.3', 'model_type': 'llava', 'quantization_config': {'quant_method': 'QuantizationMethod.BITS_AND_BYTES', '_load_in_8bit': False, '_load_in_4bit': True, 'llm_int8_threshold': 6.0, 'llm_int8_skip_modules': None, 'llm_int8_enable_fp32_cpu_offload': False, 'llm_int8_has_fp16_weight': False, 'bnb_4bit_quant_type': 'nf4', 'bnb_4bit_use_double_quant': True, 'bnb_4bit_compute_dtype': 'bfloat16', 'bnb_4bit_quant_storage': 'uint8', 'load_in_4bit': True, 'load_in_8bit': False}, 'output_dir': './', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': False, 'do_predict': False, 'evaluation_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 2, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 0.0002, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 1, 'max_steps': -1, 'lr_scheduler_type': 'constant', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': './runs/May16_07-25-42_3064694ee8b3', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_bnb_8bit', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None}
wandb/run-20240516_072542-5ru1r69k/run-5ru1r69k.wandb ADDED
Binary file (172 kB). View file