HusainNaqvijobs commited on
Commit
586f594
·
verified ·
1 Parent(s): e96bfe8
Files changed (1) hide show
  1. app.py +6 -15
app.py CHANGED
@@ -3,34 +3,26 @@ from PIL import Image
3
  import torch
4
  import os
5
 
6
- # Your Hugging Face token for gated model access
7
- import os
8
- HF_TOKEN = os.getenv("HF_TOKEN") # Secure load from Space secret!
9
-
10
- # Lingshu-7B imports
11
- from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
12
 
13
- # MedGemma imports
14
- from transformers import pipeline
15
 
16
- # Caching models and processors to avoid repeat loading
17
- lingshu_model, lingshu_processor = None, None
18
  medgemma_pipe = None
19
 
20
- # Load Lingshu-7B
21
  def load_lingshu():
22
  global lingshu_model, lingshu_processor
23
  if lingshu_model is None or lingshu_processor is None:
24
  lingshu_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
25
  "lingshu-medical-mllm/Lingshu-7B",
26
  torch_dtype=torch.bfloat16,
27
- attn_implementation="flash_attention_2",
28
  device_map="auto"
29
  )
30
  lingshu_processor = AutoProcessor.from_pretrained("lingshu-medical-mllm/Lingshu-7B")
31
  return lingshu_model, lingshu_processor
32
 
33
- # Load MedGemma-27B-IT with token for gated access
34
  def load_medgemma():
35
  global medgemma_pipe
36
  if medgemma_pipe is None:
@@ -44,9 +36,8 @@ def load_medgemma():
44
  return medgemma_pipe
45
 
46
  def inference(image, question, selected_model):
47
- # Check image and question validity
48
  if image is None or question is None or question.strip() == "":
49
- return "Please upload a medical image and enter your question/prompt."
50
  if selected_model == "Lingshu-7B":
51
  model, processor = load_lingshu()
52
  messages = [
 
3
  import torch
4
  import os
5
 
6
+ # Load Hugging Face token securely from Space Secrets
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
 
 
 
8
 
9
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, pipeline
 
10
 
11
+ lingshu_model = None
12
+ lingshu_processor = None
13
  medgemma_pipe = None
14
 
 
15
  def load_lingshu():
16
  global lingshu_model, lingshu_processor
17
  if lingshu_model is None or lingshu_processor is None:
18
  lingshu_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
19
  "lingshu-medical-mllm/Lingshu-7B",
20
  torch_dtype=torch.bfloat16,
 
21
  device_map="auto"
22
  )
23
  lingshu_processor = AutoProcessor.from_pretrained("lingshu-medical-mllm/Lingshu-7B")
24
  return lingshu_model, lingshu_processor
25
 
 
26
  def load_medgemma():
27
  global medgemma_pipe
28
  if medgemma_pipe is None:
 
36
  return medgemma_pipe
37
 
38
  def inference(image, question, selected_model):
 
39
  if image is None or question is None or question.strip() == "":
40
+ return "Please upload a medical image and enter your question or prompt."
41
  if selected_model == "Lingshu-7B":
42
  model, processor = load_lingshu()
43
  messages = [