Commit
·
0805af2
1
Parent(s):
2c50a32
add logger
Browse files
app.py
CHANGED
|
@@ -9,17 +9,19 @@ import gradio as gr
|
|
| 9 |
import queue
|
| 10 |
import time
|
| 11 |
import shutil
|
|
|
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
# 全局日志
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def log(msg):
|
| 19 |
"""追加并打印日志信息"""
|
| 20 |
-
|
| 21 |
-
current_logs.append(msg)
|
| 22 |
-
return "\n".join(current_logs)
|
| 23 |
|
| 24 |
def timeit(func):
|
| 25 |
def wrapper(*args, **kwargs):
|
|
@@ -216,20 +218,22 @@ tags:
|
|
| 216 |
- text-generation
|
| 217 |
- peft{quantization}
|
| 218 |
library_name: transformers
|
| 219 |
-
base_model: {base_model_name}
|
| 220 |
widget:
|
| 221 |
- messages:
|
| 222 |
- role: user
|
| 223 |
content: What is your favorite condiment?
|
| 224 |
license: other
|
|
|
|
|
|
|
| 225 |
---
|
| 226 |
# Model
|
| 227 |
|
| 228 |
{repo_name}
|
| 229 |
|
| 230 |
## Details:
|
| 231 |
-
- base_model: {base_model_name}
|
| 232 |
-
- lora_model: {lora_model_name}
|
| 233 |
- quant_methods: {quant_methods}
|
| 234 |
- created_at: {created_at}
|
| 235 |
- created_by: [Steven10429/apply_lora_and_quantize](https://github.com/Steven10429/apply_lora_and_quantize)
|
|
@@ -258,8 +262,9 @@ def process_model(base_model_name, lora_model_name, repo_name, quant_methods, hf
|
|
| 258 |
6. 最后统一等待所有 Future 完成,再返回日志。
|
| 259 |
"""
|
| 260 |
try:
|
| 261 |
-
|
| 262 |
-
|
|
|
|
| 263 |
os.environ["HF_TOKEN"] = hf_token
|
| 264 |
login(hf_token)
|
| 265 |
api = HfApi(token=hf_token)
|
|
@@ -281,7 +286,7 @@ def process_model(base_model_name, lora_model_name, repo_name, quant_methods, hf
|
|
| 281 |
|
| 282 |
|
| 283 |
# 量化模型
|
| 284 |
-
for quant_method in quant_methods
|
| 285 |
quantize(output_dir, repo_name, quant_method=quant_method)
|
| 286 |
|
| 287 |
create_readme(repo_name, base_model_name, lora_model_name, quant_methods)
|
|
@@ -300,12 +305,11 @@ def process_model(base_model_name, lora_model_name, repo_name, quant_methods, hf
|
|
| 300 |
shutil.rmtree(model_path)
|
| 301 |
log("Removed model from local")
|
| 302 |
|
| 303 |
-
return "\n".join(current_logs)
|
| 304 |
except Exception as e:
|
| 305 |
error_message = f"Error during processing: {e}"
|
| 306 |
log(error_message)
|
| 307 |
raise e
|
| 308 |
-
|
| 309 |
|
| 310 |
@timeit
|
| 311 |
def create_ui():
|
|
@@ -331,32 +335,25 @@ def create_ui():
|
|
| 331 |
repo_name = gr.Textbox(
|
| 332 |
label="Hugging Face Repository Name",
|
| 333 |
placeholder="Enter the repository name to create",
|
| 334 |
-
value="Auto"
|
|
|
|
| 335 |
)
|
| 336 |
quant_method = gr.CheckboxGroup(
|
| 337 |
choices=["Q2_K", "Q4_K", "IQ4_NL", "Q5_K_M", "Q6_K", "Q8_0"],
|
| 338 |
-
value=["
|
| 339 |
label="Quantization Method"
|
| 340 |
)
|
| 341 |
hf_token = gr.Textbox(
|
| 342 |
label="Hugging Face Token",
|
| 343 |
placeholder="Enter your Hugging Face Token",
|
| 344 |
-
value=
|
| 345 |
-
type="password"
|
| 346 |
)
|
| 347 |
convert_btn = gr.Button("Start Conversion", variant="primary")
|
| 348 |
with gr.Column():
|
| 349 |
-
|
| 350 |
-
label="Logs",
|
| 351 |
-
placeholder="Processing logs will appear here...",
|
| 352 |
-
interactive=False,
|
| 353 |
-
autoscroll=True,
|
| 354 |
-
lines=20
|
| 355 |
-
)
|
| 356 |
convert_btn.click(
|
| 357 |
fn=process_model,
|
| 358 |
inputs=[base_model, lora_model, repo_name, quant_method, hf_token],
|
| 359 |
-
outputs=output
|
| 360 |
)
|
| 361 |
return app
|
| 362 |
|
|
|
|
| 9 |
import queue
|
| 10 |
import time
|
| 11 |
import shutil
|
| 12 |
+
from gradio_log import Log
|
| 13 |
+
import logging
|
| 14 |
|
| 15 |
|
| 16 |
# 全局日志
|
| 17 |
+
log = logging.getLogger("space_convert")
|
| 18 |
+
log.setLevel(logging.INFO)
|
| 19 |
+
log.addHandler(logging.StreamHandler())
|
| 20 |
+
log.addHandler(logging.FileHandler("convert.log"))
|
| 21 |
|
| 22 |
def log(msg):
|
| 23 |
"""追加并打印日志信息"""
|
| 24 |
+
log.info(msg)
|
|
|
|
|
|
|
| 25 |
|
| 26 |
def timeit(func):
|
| 27 |
def wrapper(*args, **kwargs):
|
|
|
|
| 218 |
- text-generation
|
| 219 |
- peft{quantization}
|
| 220 |
library_name: transformers
|
| 221 |
+
base_model: {base_model_name}
|
| 222 |
widget:
|
| 223 |
- messages:
|
| 224 |
- role: user
|
| 225 |
content: What is your favorite condiment?
|
| 226 |
license: other
|
| 227 |
+
datasets:
|
| 228 |
+
- {lora_model_name}
|
| 229 |
---
|
| 230 |
# Model
|
| 231 |
|
| 232 |
{repo_name}
|
| 233 |
|
| 234 |
## Details:
|
| 235 |
+
- base_model: [{base_model_name}](https://huggingface.co/{base_model_name})
|
| 236 |
+
- lora_model: [{lora_model_name}](https://huggingface.co/{lora_model_name})
|
| 237 |
- quant_methods: {quant_methods}
|
| 238 |
- created_at: {created_at}
|
| 239 |
- created_by: [Steven10429/apply_lora_and_quantize](https://github.com/Steven10429/apply_lora_and_quantize)
|
|
|
|
| 262 |
6. 最后统一等待所有 Future 完成,再返回日志。
|
| 263 |
"""
|
| 264 |
try:
|
| 265 |
+
if hf_token.strip().lower() == "auto":
|
| 266 |
+
hf_token = os.getenv("HF_TOKEN")
|
| 267 |
+
elif hf_token.startswith("hf_"):
|
| 268 |
os.environ["HF_TOKEN"] = hf_token
|
| 269 |
login(hf_token)
|
| 270 |
api = HfApi(token=hf_token)
|
|
|
|
| 286 |
|
| 287 |
|
| 288 |
# 量化模型
|
| 289 |
+
for quant_method in quant_methods:
|
| 290 |
quantize(output_dir, repo_name, quant_method=quant_method)
|
| 291 |
|
| 292 |
create_readme(repo_name, base_model_name, lora_model_name, quant_methods)
|
|
|
|
| 305 |
shutil.rmtree(model_path)
|
| 306 |
log("Removed model from local")
|
| 307 |
|
|
|
|
| 308 |
except Exception as e:
|
| 309 |
error_message = f"Error during processing: {e}"
|
| 310 |
log(error_message)
|
| 311 |
raise e
|
| 312 |
+
|
| 313 |
|
| 314 |
@timeit
|
| 315 |
def create_ui():
|
|
|
|
| 335 |
repo_name = gr.Textbox(
|
| 336 |
label="Hugging Face Repository Name",
|
| 337 |
placeholder="Enter the repository name to create",
|
| 338 |
+
value="Auto",
|
| 339 |
+
type="password"
|
| 340 |
)
|
| 341 |
quant_method = gr.CheckboxGroup(
|
| 342 |
choices=["Q2_K", "Q4_K", "IQ4_NL", "Q5_K_M", "Q6_K", "Q8_0"],
|
| 343 |
+
value=["Q4_K", "Q8_0"],
|
| 344 |
label="Quantization Method"
|
| 345 |
)
|
| 346 |
hf_token = gr.Textbox(
|
| 347 |
label="Hugging Face Token",
|
| 348 |
placeholder="Enter your Hugging Face Token",
|
| 349 |
+
value="Auto"
|
|
|
|
| 350 |
)
|
| 351 |
convert_btn = gr.Button("Start Conversion", variant="primary")
|
| 352 |
with gr.Column():
|
| 353 |
+
Log("convert.log", dark=True, xterm_font_size=12)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
convert_btn.click(
|
| 355 |
fn=process_model,
|
| 356 |
inputs=[base_model, lora_model, repo_name, quant_method, hf_token],
|
|
|
|
| 357 |
)
|
| 358 |
return app
|
| 359 |
|