8bit精度训练 GLM3

115 阅读2分钟

获取数据格式

from transformers import AutoTokenizer, AutoModel
import torch

tokenizer = AutoTokenizer.from_pretrained("./ZhipuAI/chatglm3-6b/", trust_remote_code=True)
tokenizer

model = AutoModel.from_pretrained("./ZhipuAI/chatglm3-6b/", trust_remote_code=True, low_cpu_mem_usage=True, torch_dtype=torch.half, device_map="auto")

####
model.chat(tokenizer, "考试的技巧有哪些?", history=[])
?model.chat
?tokenizer.build_chat_input

tokenizer.build_chat_input("考试的技巧有哪些?", history=[], role="user")
tokenizer.decode([64790, 64792, 64795, 30910,    13, 30910, 32227, 54530, 33741, 34953, 31514, 64796])

# 数据格式:
# [gMASK]sop<|user|> \n Prompt<|assistant|> \n Response eos_token

1、导入相关包

from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, pipeline

2、加载、划分、处理数据集

# 加载
ds = Dataset.load_from_disk("./data/alpaca_data_zh/")
# 划分

# 数据处理
tokenizer = AutoTokenizer.from_pretrained("./ZhipuAI/chatglm3-6b-base/")
def process_func(example):
    MAX_LENGTH = 256
    input_ids, attention_mask, labels = [], [], []
    instruction = "\n".join([example["instruction"], example["input"]]).strip()     # query
    instruction = tokenizer.build_chat_input(instruction, history=[], role="user")  # [gMASK]sop<|user|> \n query<|assistant|>
    response = tokenizer("\n" + example["output"], add_special_tokens=False)        # \n response, 缺少eos token
    input_ids = instruction["input_ids"][0].numpy().tolist() + response["input_ids"] + [tokenizer.eos_token_id]
    attention_mask = instruction["attention_mask"][0].numpy().tolist() + response["attention_mask"] + [1]
    labels = [-100] * len(instruction["input_ids"][0].numpy().tolist()) + response["input_ids"] + [tokenizer.eos_token_id]
    if len(input_ids) > MAX_LENGTH:
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

tokenized_ds = ds.map(process_func, remove_columns=ds.column_names)    

3、创建模型

import torch
# 多卡情况,可以去掉device_map="auto",否则会将模型拆开
model = AutoModelForCausalLM.from_pretrained("./ZhipuAI/chatglm3-6b-base/", trust_remote_code=True, low_cpu_mem_usage=True, 
                                             torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit=True)

Lora

# 安装

PEFT 1、配置文件

from peft import LoraConfig, TaskType, get_peft_model

config = LoraConfig(target_modules=["query_key_value"], modules_to_save=["post_attention_layernorm"])

PEFT 2、创建模型

model = get_peft_model(model, config)
# 优化参数占比
# model.print_trainable_parameters()

4、创建评估函数

5、创建TrainingArguments、Trainer

args = TrainingArguments(
    output_dir="./chatbot",
    per_device_train_batch_size=1,
    gradient_accumulation_steps=16,
    logging_steps=10,
    num_train_epochs=1,
    learning_rate=1e-4,
    remove_unused_columns=False,
    save_strategy="epoch"
)

trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_ds.select(range(6000)),
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)

6、模型训练

trainer.train()

7、模型推理

model.eval()
print(model.chat(tokenizer, "数学考试怎么考高分?", history=[])[0])