End of training
Browse files- README.md +3 -1
 - config.json +36 -0
 
    	
        README.md
    CHANGED
    
    | 
         @@ -1,9 +1,11 @@ 
     | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
             
            base_model: rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT
         
     | 
| 
         | 
|
| 3 | 
         
             
            library_name: transformers
         
     | 
| 4 | 
         
             
            model_name: Llama3.2-3B-Prover-openr1-distill-GRPO
         
     | 
| 5 | 
         
             
            tags:
         
     | 
| 6 | 
         
             
            - generated_from_trainer
         
     | 
| 
         | 
|
| 7 | 
         
             
            - trl
         
     | 
| 8 | 
         
             
            - grpo
         
     | 
| 9 | 
         
             
            licence: license
         
     | 
| 
         @@ -11,7 +13,7 @@ licence: license 
     | 
|
| 11 | 
         | 
| 12 | 
         
             
            # Model Card for Llama3.2-3B-Prover-openr1-distill-GRPO
         
     | 
| 13 | 
         | 
| 14 | 
         
            -
            This model is a fine-tuned version of [rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT](https://huggingface.co/rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT).
         
     | 
| 15 | 
         
             
            It has been trained using [TRL](https://github.com/huggingface/trl).
         
     | 
| 16 | 
         | 
| 17 | 
         
             
            ## Quick start
         
     | 
| 
         | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
             
            base_model: rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT
         
     | 
| 3 | 
         
            +
            datasets: rkumar1999/OBT-proof-short-60-n10000
         
     | 
| 4 | 
         
             
            library_name: transformers
         
     | 
| 5 | 
         
             
            model_name: Llama3.2-3B-Prover-openr1-distill-GRPO
         
     | 
| 6 | 
         
             
            tags:
         
     | 
| 7 | 
         
             
            - generated_from_trainer
         
     | 
| 8 | 
         
            +
            - open-r1
         
     | 
| 9 | 
         
             
            - trl
         
     | 
| 10 | 
         
             
            - grpo
         
     | 
| 11 | 
         
             
            licence: license
         
     | 
| 
         | 
|
| 13 | 
         | 
| 14 | 
         
             
            # Model Card for Llama3.2-3B-Prover-openr1-distill-GRPO
         
     | 
| 15 | 
         | 
| 16 | 
         
            +
            This model is a fine-tuned version of [rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT](https://huggingface.co/rkumar1999/Llama3.2-3B-Prover-openr1-distill-SFT) on the [rkumar1999/OBT-proof-short-60-n10000](https://huggingface.co/datasets/rkumar1999/OBT-proof-short-60-n10000) dataset.
         
     | 
| 17 | 
         
             
            It has been trained using [TRL](https://github.com/huggingface/trl).
         
     | 
| 18 | 
         | 
| 19 | 
         
             
            ## Quick start
         
     | 
    	
        config.json
    ADDED
    
    | 
         @@ -0,0 +1,36 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "architectures": [
         
     | 
| 3 | 
         
            +
                "LlamaForCausalLM"
         
     | 
| 4 | 
         
            +
              ],
         
     | 
| 5 | 
         
            +
              "attention_bias": false,
         
     | 
| 6 | 
         
            +
              "attention_dropout": 0.0,
         
     | 
| 7 | 
         
            +
              "bos_token_id": 128000,
         
     | 
| 8 | 
         
            +
              "dtype": "bfloat16",
         
     | 
| 9 | 
         
            +
              "eos_token_id": 128009,
         
     | 
| 10 | 
         
            +
              "head_dim": 128,
         
     | 
| 11 | 
         
            +
              "hidden_act": "silu",
         
     | 
| 12 | 
         
            +
              "hidden_size": 3072,
         
     | 
| 13 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 14 | 
         
            +
              "intermediate_size": 8192,
         
     | 
| 15 | 
         
            +
              "max_position_embeddings": 131072,
         
     | 
| 16 | 
         
            +
              "mlp_bias": false,
         
     | 
| 17 | 
         
            +
              "model_type": "llama",
         
     | 
| 18 | 
         
            +
              "num_attention_heads": 24,
         
     | 
| 19 | 
         
            +
              "num_hidden_layers": 28,
         
     | 
| 20 | 
         
            +
              "num_key_value_heads": 8,
         
     | 
| 21 | 
         
            +
              "pad_token_id": 128009,
         
     | 
| 22 | 
         
            +
              "pretraining_tp": 1,
         
     | 
| 23 | 
         
            +
              "rms_norm_eps": 1e-05,
         
     | 
| 24 | 
         
            +
              "rope_scaling": {
         
     | 
| 25 | 
         
            +
                "factor": 32.0,
         
     | 
| 26 | 
         
            +
                "high_freq_factor": 4.0,
         
     | 
| 27 | 
         
            +
                "low_freq_factor": 1.0,
         
     | 
| 28 | 
         
            +
                "original_max_position_embeddings": 8192,
         
     | 
| 29 | 
         
            +
                "rope_type": "llama3"
         
     | 
| 30 | 
         
            +
              },
         
     | 
| 31 | 
         
            +
              "rope_theta": 500000.0,
         
     | 
| 32 | 
         
            +
              "tie_word_embeddings": true,
         
     | 
| 33 | 
         
            +
              "transformers_version": "4.57.0",
         
     | 
| 34 | 
         
            +
              "use_cache": true,
         
     | 
| 35 | 
         
            +
              "vocab_size": 128256
         
     | 
| 36 | 
         
            +
            }
         
     |