onkar-waghmode commited on
Commit
17ca74c
·
1 Parent(s): 7048a4b
Files changed (1) hide show
  1. app.py +24 -22
app.py CHANGED
@@ -45,27 +45,6 @@ print("Models loaded successfully!")
45
 
46
 
47
 
48
- # ============================================================================
49
- # AI Detection
50
- # ============================================================================
51
- def predict_ai_content(text):
52
- if not text or not text.strip():
53
- return "No input provided", 0.0
54
-
55
- try:
56
- result = ai_detector_pipe(text)
57
- if isinstance(result, list) and len(result) > 0:
58
- res = result[0]
59
- ai_content_label = res.get('label', 'Unknown')
60
- ai_content_score = round(float(res.get('score', 0)) * 100, 2)
61
- return ai_content_label, ai_content_score
62
- else:
63
- return "Invalid response", 0.0
64
- except Exception as e:
65
- print(f"Error in prediction: {e}")
66
- return "Error", 0.0
67
-
68
-
69
  # ============================================================================
70
  # STAGE 1: PARAPHRASING WITH T5 MODEL
71
  # ============================================================================
@@ -358,11 +337,34 @@ def calculate_similarity(text1: str, text2: str) -> float:
358
  similarity = float(np.dot(embeddings[0], embeddings[1]) / (
359
  np.linalg.norm(embeddings[0]) * np.linalg.norm(embeddings[1])
360
  ))
 
361
  return similarity
362
  except Exception as e:
363
  logger.error(f"Similarity calculation failed: {e}")
364
  return 0.0
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  # ============================================================================
367
  # MAIN HUMANIZER FUNCTION
368
  # ============================================================================
@@ -487,7 +489,7 @@ def create_gradio_interface():
487
  gr.Markdown("### Semantic Similarity & Status")
488
 
489
  with gr.Row():
490
- similarity_output = gr.Number(label="Similarity Score", precision=4)
491
  status_output = gr.Textbox(label="Status",interactive=False,lines=2, max_lines=10)
492
 
493
  with gr.Row():
 
45
 
46
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  # ============================================================================
49
  # STAGE 1: PARAPHRASING WITH T5 MODEL
50
  # ============================================================================
 
337
  similarity = float(np.dot(embeddings[0], embeddings[1]) / (
338
  np.linalg.norm(embeddings[0]) * np.linalg.norm(embeddings[1])
339
  ))
340
+ similarity = round(similarity*100, 2)
341
  return similarity
342
  except Exception as e:
343
  logger.error(f"Similarity calculation failed: {e}")
344
  return 0.0
345
 
346
+
347
+ # ============================================================================
348
+ # AI Detection
349
+ # ============================================================================
350
+ def predict_ai_content(text):
351
+ if not text or not text.strip():
352
+ return "No input provided", 0.0
353
+
354
+ try:
355
+ result = ai_detector_pipe(text)
356
+ if isinstance(result, list) and len(result) > 0:
357
+ res = result[0]
358
+ ai_content_label = res.get('label', 'Unknown')
359
+ ai_content_score = round(float(res.get('score', 0)) * 100, 2)
360
+ return ai_content_label, ai_content_score
361
+ else:
362
+ return "Invalid response", 0.0
363
+ except Exception as e:
364
+ print(f"Error in prediction: {e}")
365
+ return "Error", 0.0
366
+
367
+
368
  # ============================================================================
369
  # MAIN HUMANIZER FUNCTION
370
  # ============================================================================
 
489
  gr.Markdown("### Semantic Similarity & Status")
490
 
491
  with gr.Row():
492
+ similarity_output = gr.Number(label="Content Similarity (%)", precision=2)
493
  status_output = gr.Textbox(label="Status",interactive=False,lines=2, max_lines=10)
494
 
495
  with gr.Row():