thinkingnew commited on
Commit
b22328e
·
1 Parent(s): 678c4c4
Files changed (1) hide show
  1. app.py +6 -29
app.py CHANGED
@@ -13,7 +13,7 @@ adapter_path = "thinkingnew/llama_invs_adapter"
13
  # Check if GPU is available
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
- # Load base model with device_map="auto" to handle GPUs automatically
17
  base_model = AutoModelForCausalLM.from_pretrained(
18
  base_model_path, torch_dtype=torch.float16, device_map="auto"
19
  )
@@ -21,46 +21,23 @@ base_model = AutoModelForCausalLM.from_pretrained(
21
  # Load adapter and ensure it is on the correct device
22
  model = PeftModel.from_pretrained(base_model, adapter_path).to(device)
23
 
24
- # Load tokenizer and ensure padding token is set
25
  tokenizer = AutoTokenizer.from_pretrained(base_model_path)
26
- tokenizer.pad_token = tokenizer.eos_token # Avoids padding issues
27
 
28
  # Define request model for validation
29
  class GenerateRequest(BaseModel):
30
  prompt: str
31
 
32
- # **Use model.generate() instead of pipeline()**
33
  def generate_text_from_model(prompt: str):
34
  try:
35
- input_data = tokenizer(
36
- f"<s>[INST] {prompt} [/INST]",
37
- return_tensors="pt",
38
- padding=True,
39
- truncation=True
40
- )
41
- input_ids = input_data.input_ids.to(device)
42
- attention_mask = input_data.attention_mask.to(device)
43
-
44
- # Generate output
45
- output_ids = model.generate(
46
- input_ids,
47
- max_length=512,
48
- pad_token_id=tokenizer.eos_token_id, # Explicitly setting pad_token_id
49
- attention_mask=attention_mask
50
- )
51
-
52
  generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
53
-
54
- # Extract only the assistant's response
55
- response_text = generated_text.split("<|assistant|>\n")[-1].strip()
56
- return response_text
57
- except torch.cuda.OutOfMemoryError:
58
- torch.cuda.empty_cache()
59
- raise HTTPException(status_code=500, detail="CUDA Out of Memory. Try using a smaller model or lowering max_length.")
60
  except Exception as e:
61
  raise HTTPException(status_code=500, detail=str(e))
62
 
63
-
64
  # Root endpoint for testing
65
  @app.get("/")
66
  async def root():
 
13
  # Check if GPU is available
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
+ # Load base model with `device_map="auto"` to handle GPUs automatically
17
  base_model = AutoModelForCausalLM.from_pretrained(
18
  base_model_path, torch_dtype=torch.float16, device_map="auto"
19
  )
 
21
  # Load adapter and ensure it is on the correct device
22
  model = PeftModel.from_pretrained(base_model, adapter_path).to(device)
23
 
24
+ # Load tokenizer
25
  tokenizer = AutoTokenizer.from_pretrained(base_model_path)
 
26
 
27
  # Define request model for validation
28
  class GenerateRequest(BaseModel):
29
  prompt: str
30
 
31
+ # **Use `model.generate()` instead of `pipeline()`**
32
  def generate_text_from_model(prompt: str):
33
  try:
34
+ input_ids = tokenizer(f"<s>[INST] {prompt} [/INST]", return_tensors="pt").input_ids.to(device)
35
+ output_ids = model.generate(input_ids, max_length=512)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
37
+ return generated_text
 
 
 
 
 
 
38
  except Exception as e:
39
  raise HTTPException(status_code=500, detail=str(e))
40
 
 
41
  # Root endpoint for testing
42
  @app.get("/")
43
  async def root():