models: - model: TareksLab/Emerald-V2-LLaMa-70B parameters: select_topk: [0.1, 0.1, 0.1, 0.1, 0.2, 0.5] - model: TareksLab/Carnelian-V2-LLaMa-70B parameters: select_topk: [0.1, 0.1, 0.1, 0.2, 0.4, 0.2] - model: TareksLab/Ruby-V2-LLaMa-70B parameters: select_topk: [0.1, 0.1, 0.2, 0.4, 0.2, 0.1] - model: TareksLab/Amethyst-V2-LLaMa-70B parameters: select_topk: [0.1, 0.2, 0.4, 0.2, 0.1, 0.1] - model: TareksLab/Citrine-V2-LLaMa-70B parameters: select_topk: [0.2, 0.4, 0.2, 0.1, 0.1, 0.1] - model: TareksLab/Sapphire-V2-LLaMa-70B parameters: select_topk: [0.5, 0.2, 0.1, 0.1, 0.1, 0.1] base_model: TareksLab/Emerald-V2-LLaMa-70B merge_method: sce parameters: int8_mask: true dtype: float32 out_dtype: bfloat16 chat_template: llama3 tokenizer: source: TareksLab/Ruby-V2-LLaMa-70B pad_to_multiple_of: 8