zRzRzRzRzRzRzR commited on
Commit
914732f
·
1 Parent(s): 3c9fd16
Files changed (2) hide show
  1. app.py +5 -5
  2. requirements.txt +4 -4
app.py CHANGED
@@ -5,15 +5,15 @@ import subprocess
5
  import tempfile
6
  import threading
7
  from pathlib import Path
 
8
  import fitz
9
  import gradio as gr
10
  import time
11
  import html
12
  import torch
13
  from transformers import AutoProcessor, Glm4vForConditionalGeneration, TextIteratorStreamer
14
- import spaces
15
 
16
- MODEL_PATH = "THUDM/GLM-4.1V-9B-Thinking"
17
  stop_generation = False
18
 
19
  processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
@@ -254,11 +254,11 @@ demo = gr.Blocks(title="GLM-4.1V-9B-Thinking", theme=gr.themes.Soft())
254
  with demo:
255
  gr.Markdown(
256
  "<div style='text-align:center;font-size:32px;font-weight:bold;margin-bottom:20px;'>GLM-4.1V-9B-Thinking</div>"
257
- "<div style='text-align:center;'><a href='https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking'>Model Hub</a> | "
258
- "<a href='https://github.com/THUDM/GLM-4.1V-Thinking'>Github</a> |"
259
  "<a href='https://arxiv.org/abs/2507.01006'>Paper</a> |"
260
  "<a href='https://www.bigmodel.cn/dev/api/visual-reasoning-model/GLM-4.1V-Thinking'>API</a> </div>"
261
- "<div style='text-align:center;color:gray;font-size:14px;margin-top:10px;'>This demo runs on local GPU for faster experience. For the API version, visit <a href='https://huggingface.co/spaces/THUDM/GLM-4.1V-9B-Thinking-API-Demo' target='_blank'>this Space</a>.</div>"
262
 
263
  )
264
  raw_history = gr.State([])
 
5
  import tempfile
6
  import threading
7
  from pathlib import Path
8
+ import spaces
9
  import fitz
10
  import gradio as gr
11
  import time
12
  import html
13
  import torch
14
  from transformers import AutoProcessor, Glm4vForConditionalGeneration, TextIteratorStreamer
 
15
 
16
+ MODEL_PATH = "zai-org/GLM-4.1V-9B-Thinking"
17
  stop_generation = False
18
 
19
  processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
 
254
  with demo:
255
  gr.Markdown(
256
  "<div style='text-align:center;font-size:32px;font-weight:bold;margin-bottom:20px;'>GLM-4.1V-9B-Thinking</div>"
257
+ "<div style='text-align:center;'><a href='https://huggingface.co/zai-org/GLM-4.1V-9B-Thinking'>Model Hub</a> | "
258
+ "<a href='https://github.com/zai-org/GLM-V'>Github</a> |"
259
  "<a href='https://arxiv.org/abs/2507.01006'>Paper</a> |"
260
  "<a href='https://www.bigmodel.cn/dev/api/visual-reasoning-model/GLM-4.1V-Thinking'>API</a> </div>"
261
+ "<div style='text-align:center;color:gray;font-size:14px;margin-top:10px;'>This demo runs on local GPU for faster experience. For the API version, visit <a href='https://huggingface.co/spaces/zai-org/GLM-4.1V-9B-Thinking-API-Demo' target='_blank'>this Space</a>.</div>"
262
 
263
  )
264
  raw_history = gr.State([])
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- git+https://github.com/huggingface/transformers.git@main
2
- accelerate>=1.6.0
3
- gradio==5.25.0
4
- spaces>=0.37.1
5
  PyMuPDF>=1.26.1
6
  torchvision==0.20.1
7
  torch==2.5.1
 
1
+ transformers>=4.57.1
2
+ accelerate>=1.11.0
3
+ gradio>=5.49.1
4
+ spaces>=0.42.1
5
  PyMuPDF>=1.26.1
6
  torchvision==0.20.1
7
  torch==2.5.1