mel-seto shftan commited on
Commit
5c74b9f
·
1 Parent(s): 6e635be

Change model from bloom to bloomz and change prompt (#1)

Browse files

- Update app.py (7bc648a82986fa4d32ac01272acbf13e465bcdb5)


Co-authored-by: Sarah <[email protected]>

Files changed (1) hide show
  1. app.py +9 -50
app.py CHANGED
@@ -1,55 +1,14 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- MODEL_ID = "bigscience/bloom-560m"
5
-
6
- # load once globally
7
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
8
- model = AutoModelForCausalLM.from_pretrained(
9
- MODEL_ID,
10
- device_map="auto", # GPU if available, otherwise CPU
11
- trust_remote_code=True
12
- )
13
-
14
- pipe = pipeline(
15
- "text-generation",
16
- model=model,
17
- tokenizer=tokenizer,
18
- max_new_tokens=200,
19
- temperature=0.7
20
- )
21
-
22
- def generate_idiom(situation: str):
23
- prompt = f"""You are a wise assistant. Given a situation, respond with exactly:
24
- 1. A single Chinese idiom (成语).
25
- 2. Its pinyin.
26
- 3. A short English explanation.
27
-
28
- Format:
29
- Idiom
30
- Pinyin
31
- Explanation
32
-
33
- Situation: {situation}
34
- Answer:
35
- """
36
- response = pipe(prompt)[0]["generated_text"]
37
- clean_response = response.split("Answer:")[-1].strip()
38
-
39
- # Try to split into lines
40
- lines = [line.strip() for line in clean_response.split("\n") if line.strip()]
41
- if len(lines) >= 3:
42
- idiom = lines[0]
43
- pinyin = lines[1]
44
- meaning = " ".join(lines[2:])
45
- explanation = f"{pinyin}\n\n{meaning}"
46
- else:
47
- # fallback if formatting is off
48
- idiom = clean_response
49
- explanation = ""
50
-
51
- return idiom, explanation
52
-
53
 
54
  with gr.Blocks(css="""
55
  .idiom-output {
@@ -100,4 +59,4 @@ with gr.Blocks(css="""
100
 
101
 
102
  if __name__ == "__main__":
103
- demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ pipe = pipeline(task="text-generation", model="bigscience/bloomz-560m")
5
+
6
+ def generate_idiom(situation):
7
+ response = pipe(f"Give me a Chinese idiom for this: {situation}.")
8
+ print(response)
9
+ pinyin = ""
10
+ meaning = ""
11
+ return response, f"{pinyin}\n\n{meaning}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  with gr.Blocks(css="""
14
  .idiom-output {
 
59
 
60
 
61
  if __name__ == "__main__":
62
+ demo.launch()