#--- Mergekit Example: della_linear --- | |
# Method: Implements the DELLA concept (Deep Ensembling with Layer-wise Linear Averaging). | |
# This typically involves a sophisticated layer-wise linear combination of models. | |
base_model: /media/administrator/oiseauxai1data1/modelout/Smart-base-v2 | |
models: | |
- model: /media/administrator/oiseauxai1data1/modelout/Dark-Base-V1 | |
parameters: | |
weight: 0.6 | |
density: 0.95 | |
epsilon: 0.018 # <-- Epsilon for this model | |
- model: /media/administrator/oiseauxai1data/modelout/Story-Base-V2 | |
parameters: | |
weight: 0.3 | |
density: 0.80 | |
epsilon: 0.018 # <-- Epsilon for this model | |
- model: /media/administrator/oiseauxai1data1/modelout/Middle-Base-V1 | |
parameters: | |
weight: 0.3 | |
density: 0.80 | |
epsilon: 0.018 # <-- Epsilon for this model | |
model_name: L3.3-70b-Amalgamma-V2 # Name of your merge | |
dtype: float32 # Input size float32, float16, bfloat16 | |
out_dtype: bfloat16 # output size float32, float16, bfloat16 | |
merge_method: della | |
parameters: # These are global parameters for the merge_method itself | |
normalize: false | |
# epsilon: 0.018 # REMOVE global epsilon from here | |
lambda: 1.20 | |
tokenizer_source: base # Or 'base' if base_model is set, or 'union', careful with this one | |
chat_template: llama3 # Template for chat (Chatml, llama3, etc...) | |
license: apache-2.0 # License type |