From c001e3981a1f57387a6ef065671905c1fe7874a7 Mon Sep 17 00:00:00 2001
From: Julien Breton <julien.breton@moncitron.fr>
Date: Thu, 15 Feb 2024 09:48:20 +0900
Subject: [PATCH] fix issues generating miqu

---
 modules/llm/Miqu-1-70b/Miqu-1-70b_fine_tune.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/modules/llm/Miqu-1-70b/Miqu-1-70b_fine_tune.py b/modules/llm/Miqu-1-70b/Miqu-1-70b_fine_tune.py
index 56f86c9..9c3d283 100644
--- a/modules/llm/Miqu-1-70b/Miqu-1-70b_fine_tune.py
+++ b/modules/llm/Miqu-1-70b/Miqu-1-70b_fine_tune.py
@@ -99,7 +99,14 @@ def generate(base_model, new_model):
         load_in_4bit=True,
         attn_implementation="flash_attention_2"
     )
-    model = PeftModel.from_pretrained(base_model_reload, new_model)
+    model = PeftModel.from_pretrained(
+        base_model_reload,
+        new_model,
+        device_map="auto",
+        load_in_8bit=False,
+        load_in_4bit=True,
+        attn_implementation="flash_attention_2"
+    )
     model = model.merge_and_unload()
 
     tokenizer = transformers.AutoTokenizer.from_pretrained(base_model)
@@ -151,7 +158,7 @@ def generate(base_model, new_model):
 base_model = "../../../models/Miqu-1-70b"
 new_model = "../../../models/Fine-tuned_Miqu-1-70b"
 
-fine_tune(base_model, new_model)
+#fine_tune(base_model, new_model)
 generate(base_model, new_model)
 
 print("========== Program finished ==========")
-- 
GitLab