1、导入相关包
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, pipeline
2、加载、划分、处理数据集
# 加载
ds = Dataset.load_from_disk("./data/alpaca_data_zh/")
# 划分
# 数据处理
tokenizer = AutoTokenizer.from_pretrained("Langboat/bloom-1b4-zh")
def process_func(example):
MAX_LENGTH = 256
input_ids, attention_mask, labels = [], [], []
instruction = tokenizer("\n".join(["Human: " + example["instruction"], example["input"]]).strip() + "\n\nAssistant: ")
response = tokenizer(example["output"] + tokenizer.eos_token)
input_ids = instruction["input_ids"] + response["input_ids"]
attention_mask = instruction["attention_mask"] + response["attention_mask"]
labels = [-100] * len(instruction["input_ids"]) + response["input_ids"]
if len(input_ids) > MAX_LENGTH:
input_ids = input_ids[:MAX_LENGTH]
attention_mask = attention_mask[:MAX_LENGTH]
labels = labels[:MAX_LENGTH]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels
}
tokenized_ds = ds.map(process_func, remove_columns=ds.column_names)
3、创建模型
model = AutoModelForCausalLM.from_pretrained("Langboat/bloom-1b4-zh", low_cpu_mem_usage=True)
# 统计模型参数
sum(param.numel() for param in model.parameters())
# 模型占用显存计算方法 1b4
# model size: 1.3B
# model: 1.3G * 4 ~= 5.2G
# gradient: 1.3G * 4 ~= 5.2G
# optimizer: 1.3G * 4 * 2 ~= 10.4G
# sum: 20.8G
BitFit
# 选择模型参数里面的所有bias部分
num_param = 0
for name, param in model.named_parameters():
if "bias" not in name:
param.requires_grad = False
else:
num_param += param.numel()
# 优化参数占比
# num_param / allPar
4、创建评估函数
5、创建TrainingArguments、Trainer
args = TrainingArguments(
output_dir="./chatbot",
per_device_train_batch_size=1,
gradient_accumulation_steps=8,
logging_steps=10,
num_train_epochs=1
)
trainer = Trainer(
model=model,
args=args,
tokenizer=tokenizer,
train_dataset=tokenized_ds,
data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)
6、模型训练、评估、预测
# 模型训练
trainer.train()
# 模型评估
trainer.evaluate()
7、模型推理
model = model.cuda()
ipt = tokenizer("Human: {}\n{}".format("考试有哪些技巧?", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(model.device)
tokenizer.decode(model.generate(**ipt, max_length=128, do_sample=True)[0], skip_special_tokens=True)
pipeline 模型推理
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
ipt = "Human: {}\n{}".format("考试有哪些技巧?", "").strip() + "\n\nAssistant: "
pipe(ipt, max_length=256, do_sample=True, )