pere commited on
Commit
f2a6206
·
1 Parent(s): 86bbb27

Saving weights and logs of step 10000

Browse files
events.out.tfevents.1642108860.t1v-n-6f5efcd5-w-0.604413.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab5fc1b7b81c77898124077e503b27d796784c21a8862243e091545846c1404
3
+ size 1470136
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef44b87241742f4f804608e53a522ca494c43160f37a11baebfe8203d1b82c3a
3
  size 498796983
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cdf5da2a3aae0b2ad6669aebb07d2b02b88c0103039ec07d2220717a4667368
3
  size 498796983
run_mlm_flax.py CHANGED
@@ -129,6 +129,10 @@ class DataTrainingArguments:
129
  static_learning_rate: bool = field(
130
  default=False, metadata={"help": "Use a non decaying learning rate"}
131
  )
 
 
 
 
132
  auth_token: bool = field(
133
  default=False, metadata={"help": "Use authorisation token"}
134
  )
@@ -510,6 +514,8 @@ if __name__ == "__main__":
510
 
511
  if data_args.static_learning_rate:
512
  end_lr_value = training_args.learning_rate
 
 
513
  else:
514
  end_lr_value = 0
515
 
 
129
  static_learning_rate: bool = field(
130
  default=False, metadata={"help": "Use a non decaying learning rate"}
131
  )
132
+ end_learning_rate: float = field(
133
+ default=0, metadata={"help": "End learning rate. Will be ignored it startic learning rate is set"}
134
+ )
135
+
136
  auth_token: bool = field(
137
  default=False, metadata={"help": "Use authorisation token"}
138
  )
 
514
 
515
  if data_args.static_learning_rate:
516
  end_lr_value = training_args.learning_rate
517
+ elif data_args.end_learning_rate:
518
+ end_lr_value = data_args.end_learning_rate
519
  else:
520
  end_lr_value = 0
521
 
run_step5.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ./run_mlm_flax.py \
2
+ --output_dir="./" \
3
+ --model_type="roberta" \
4
+ --model_name_or_path="./" \
5
+ --config_name="./" \
6
+ --tokenizer_name="./" \
7
+ --train_file /mnt/disks/flaxdisk/corpus/train_1_4.json \
8
+ --validation_file /mnt/disks/flaxdisk/corpus/validation.json \
9
+ --cache_dir="/mnt/disks/flaxdisk/cache/" \
10
+ --max_seq_length="128" \
11
+ --weight_decay="0.01" \
12
+ --per_device_train_batch_size="200" \
13
+ --per_device_eval_batch_size="200" \
14
+ --learning_rate="6e-4" \
15
+ --warmup_steps="0" \
16
+ --overwrite_output_dir \
17
+ --num_train_epochs="6" \
18
+ --adam_beta1="0.9" \
19
+ --adam_beta2="0.98" \
20
+ --logging_steps="10000" \
21
+ --save_steps="10000" \
22
+ --eval_steps="10000" \
23
+ --preprocessing_num_workers="64" \
24
+ --auth_token="True" \
25
+ --dtype="bfloat16" \
26
+ --adafactor \
27
+ --push_to_hub