Vladyslav Humennyy commited on
Commit
9caa247
·
1 Parent(s): c8d415a

Get rid of try except to see error

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -197,51 +197,51 @@ def bot(
197
 
198
  # Use processor if images are present
199
  if processor is not None and has_images:
200
- try:
201
- processor_history = []
202
- for msg in history:
203
- role = msg.get("role", "user")
204
- content = msg.get("content")
205
-
206
- if isinstance(content, str):
207
- processor_history.append({"role": role, "content": content})
208
- elif isinstance(content, tuple):
209
- formatted_content = []
210
- tmp_path, _ = content
211
- pil_image = Image.open(tmp_path)
212
-
213
- if pil_image is not None:
214
- # formatted_content.append({"type": "image", "image": pil_image})
215
- buffered = io.BytesIO()
216
- pil_image.save(buffered, format="JPEG")
217
- img_base64 = base64.b64encode(buffered.getvalue()).decode()
218
- image_input = {
219
- "type": "image_url",
220
- "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
221
- }
222
- if processor_history[-1].get('role') == 'user':
223
- if isinstance(processor_history[-1].get('content'), str):
224
- previous_message = processor_history[-1].get('content')
225
- formatted_content.append({"type": "text", "text": previous_message})
226
- formatted_content.append(image_input)
227
- processor_history[-1]['content'] = formatted_content
228
- elif isinstance(processor_history[-1].get('content'), list):
229
- processor_history[-1]['content'].append(image_input)
230
- else:
231
- if formatted_content:
232
- processor_history.append({"role": role, "content": formatted_content})
233
-
234
- print(processor_history)
235
-
236
- model_inputs = processor(
237
- messages=processor_history,
238
- return_tensors="pt",
239
- add_generation_prompt=True,
240
- ).to(model.device)
241
- print("Using processor for vision input")
242
- except Exception as exc:
243
- print(f"Processor failed: {exc}")
244
- model_inputs = None
245
 
246
  # Fallback to tokenizer for text-only
247
  if model_inputs is None:
 
197
 
198
  # Use processor if images are present
199
  if processor is not None and has_images:
200
+ # try:
201
+ processor_history = []
202
+ for msg in history:
203
+ role = msg.get("role", "user")
204
+ content = msg.get("content")
205
+
206
+ if isinstance(content, str):
207
+ processor_history.append({"role": role, "content": content})
208
+ elif isinstance(content, tuple):
209
+ formatted_content = []
210
+ tmp_path, _ = content
211
+ pil_image = Image.open(tmp_path)
212
+
213
+ if pil_image is not None:
214
+ # formatted_content.append({"type": "image", "image": pil_image})
215
+ buffered = io.BytesIO()
216
+ pil_image.save(buffered, format="JPEG")
217
+ img_base64 = base64.b64encode(buffered.getvalue()).decode()
218
+ image_input = {
219
+ "type": "image_url",
220
+ "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
221
+ }
222
+ if processor_history[-1].get('role') == 'user':
223
+ if isinstance(processor_history[-1].get('content'), str):
224
+ previous_message = processor_history[-1].get('content')
225
+ formatted_content.append({"type": "text", "text": previous_message})
226
+ formatted_content.append(image_input)
227
+ processor_history[-1]['content'] = formatted_content
228
+ elif isinstance(processor_history[-1].get('content'), list):
229
+ processor_history[-1]['content'].append(image_input)
230
+ else:
231
+ if formatted_content:
232
+ processor_history.append({"role": role, "content": formatted_content})
233
+
234
+ print(processor_history)
235
+
236
+ model_inputs = processor(
237
+ messages=processor_history,
238
+ return_tensors="pt",
239
+ add_generation_prompt=True,
240
+ ).to(model.device)
241
+ print("Using processor for vision input")
242
+ # except Exception as exc:
243
+ # print(f"Processor failed: {exc}")
244
+ # model_inputs = None
245
 
246
  # Fallback to tokenizer for text-only
247
  if model_inputs is None: