半精度训练 Llama

99 阅读2分钟

1、导入相关包

from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, pipeline

2、加载、划分、处理数据集

# 加载
ds = Dataset.load_from_disk("./data/alpaca_data_zh/")
# 划分

# 数据处理
tokenizer = AutoTokenizer.from_pretrained("./modelscope/Llama-2-7b-ms")
tokenizer.padding_side = "right"  # 一定要设置padding_side为right,否则batch大于1时可能不收敛
tokenizer.pad_token_id = 2
def process_func(example):
    MAX_LENGTH = 1024    # Llama分词器会将一个中文字切分为多个token,因此需要放开一些最大长度,保证数据的完整性
    input_ids, attention_mask, labels = [], [], []
    instruction = tokenizer("\n".join(["Human: " + example["instruction"], example["input"]]).strip() + "\n\nAssistant: ", add_special_tokens=False)
    response = tokenizer(example["output"], add_special_tokens=False)
    input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.eos_token_id]
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
    labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.eos_token_id]
    if len(input_ids) > MAX_LENGTH:
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

tokenized_ds = ds.map(process_func, remove_columns=ds.column_names)    

3、创建模型

import torch
# 多卡情况,可以去掉device_map="auto",否则会将模型拆开
model = AutoModelForCausalLM.from_pretrained("./modelscope/Llama-2-7b-ms", low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, device_map="auto")

Lora

# 安装

PEFT 1、配置文件

from peft import LoraConfig, TaskType, get_peft_model

config = LoraConfig(task_type=TaskType.CAUSAL_LM,)

PEFT 2、创建模型

model = get_peft_model(model, config)
# 优化参数占比
# model.print_trainable_parameters()

4、创建评估函数

5、创建TrainingArguments、Trainer

model.enable_input_require_grads() # 开启梯度检查点时,要执行该方法
model = model.half()

args = TrainingArguments(
    output_dir="./chatbot",
    per_device_train_batch_size=1,
    gradient_accumulation_steps=8,
    logging_steps=10,
    num_train_epochs=1,
    gradient_checkpointing=True, # 梯度检查点
    adam_epsilon=1e-4
)

trainer = Trainer(
    model=model,
    args=args,
    tokenizer=tokenizer,
    train_dataset=tokenized_ds.select(range(6000)),
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)

6、模型训练

trainer.train()

7、模型推理

model.eval()
ipt = tokenizer("Human: {}\n{}".format("你好", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(model.device)
tokenizer.decode(model.generate(**ipt, max_length=512, do_sample=True, eos_token_id=tokenizer.eos_token_id)[0], skip_special_tokens=True)

注意

  1. LlaMA2模型分词器会将非单独存在的eos_token切开,因此对于eos_token要单独处理,否则训练后的模型在预测时不知道何时停止
  2. 半精度训练时,正确加入eos_token后,要将pad_token_id也置为eos token_id,否则模型通用无法收敛