merge_method: model_stock models: - model: hitachi-nlp/Llama-3.1-70B-FLDx2 parameters: weight: 1.0 - model: migtissera/Tess-3-Llama-3.1-70B parameters: weight: 1.0 base_model: nbeerbower/Llama3.1-Gutenberg-Doppel-70B dtype: bfloat16 out_dtype: bfloat16 parameters: int8_mask: true normalize: true rescale: false filter_wise: false smooth: false allow_negative_weights: false chat_template: auto tokenizer: source: union