finetune chinese Meta Llama3 Instruct 8b with Llama-Factory
“top.model_name": "LLaMA3-8B-Chat",
"top.finetuning_type": "lora",
"top.adapter_path": [],
"top.quantization_bit": "none",
"top.template": "llama3",
"top.rope_scaling": "none",
top.booster": "none",
"train.training_stage": "Supervised Fine-Tuning",
"train.dataset_dir": "data",
"train.dataset": [
"alpaca_zh",
"alpaca_gpt4_zh",
"guanaco",
"oaast_sft_zh",
"wikipedia_zh"
],
top.model_name": "LLaMA3-8B-Chat",
"top.finetuning_type": "lora",
"top.adapter_path": [],
"top.quantization_bit": "none",
"top.template": "llama3",
"top.rope_scaling": "none",
"top.booster": "none",
"train.training_stage": "Supervised Fine-Tuning",
"train.dataset_dir": "data",
"train.dataset": [
"alpaca_zh",
"alpaca_gpt4_zh",
"guanaco",
"nsfc_zh",
"oaast_sft_zh",
"wikipedia_zh"
],
- Downloads last month
- 4
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.