```python # 模型 from transformers import AutoModelForCausalLM model_path = 'D://qwen2/dir' base_model = AutoModelForCausalLM.from_pretrained(model_path) from peft import LoraConfig lora_config = LoraConfig( r=8, lora_alpha=16, task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], bias="none", lora_dropout=0.05 ) from peft import get_peft_model model = get_peft_model(base_model, lora_config) model.to("cuda") # 数据 from modelscope.msdatasets import MsDataset from datasets import Dataset dataset : Dataset = MsDataset.load("llamafactory/alpaca_zh", subset_name="default")['train'] # type:ignore dataset = dataset.map( lambda val: { "text": f"Instruction: {val['instruction']}\n" + f"Input: {val['input'] if val['input'] else ''}\n" + f"Output: {val['output']}" } ) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_path) dataset = dataset.map( lambda val: tokenizer( val['text'], truncation=True, padding="max_length", max_length=48 ), batched=True, remove_columns=["text"] ) dataset = dataset.map( lambda val: {"labels": val['input_ids'].copy()}, batched=True ) train_len = int(0.8 * len(dataset)) train_dataset = dataset.select(range(train_len)) eval_dataset = dataset.select(range(train_len, len(dataset))) # 训练 from transformers import TrainingArguments train_arg = TrainingArguments( eval_strategy="epoch", save_strategy="epoch", num_train_epochs=1, output_dir="./output", load_best_model_at_end=True, per_gpu_eval_batch_size=1, ) from transformers import Trainer trainer = Trainer( args=train_arg, model=model, train_dataset=train_dataset, eval_dataset=eval_dataset ) trainer.train() trainer.save_model("./final_model1") ```训练截图
训练结束后的测试代码