runtime error correction

#1
Files changed (1) hide show
  1. app.py +7 -97
app.py CHANGED
@@ -1,94 +1,5 @@
1
- import time
2
- import random
3
- import numpy as np
4
- import matplotlib.pyplot as plt
5
- import gradio as gr
6
-
7
- # Visualization style
8
- plt.style.use('seaborn-v0_8-darkgrid')
9
-
10
- # Random seed for reproducibility
11
- random.seed(42)
12
- np.random.seed(42)
13
-
14
- def simulate_batches(num_workers=4,
15
- batch_time=500, # ms
16
- network_latency=200, # ms
17
- mode='synchronous',
18
- num_batches=10):
19
- """
20
- Simulates mini-batch scheduling under synchronous vs asynchronous update strategies.
21
- Returns worker timelines and performance metrics.
22
- """
23
- timelines = [] # [(worker_id, start, end, active_flag)]
24
- current_time = [0] * num_workers # track each worker's time progress
25
-
26
- for batch in range(num_batches):
27
- for w in range(num_workers):
28
- # Each worker takes a random batch processing time with jitter
29
- proc_time = random.uniform(batch_time * 0.8, batch_time * 1.2)
30
- start = current_time[w]
31
- end = start + proc_time
32
- timelines.append((w, start, end, 'active'))
33
- current_time[w] = end
34
-
35
- if mode == 'synchronous':
36
- # Barrier: wait for all workers to finish
37
- max_time = max(current_time)
38
- for w in range(num_workers):
39
- if current_time[w] < max_time:
40
- timelines.append((w, current_time[w], max_time, 'idle'))
41
- current_time[w] = max_time
42
- # Add sync overhead (e.g., gradient aggregation)
43
- current_time = [t + network_latency for t in current_time]
44
- else:
45
- # Asynchronous mode adds random network jitter
46
- current_time = [t + random.uniform(0, network_latency * 0.3) for t in current_time]
47
-
48
- total_time = max(current_time)
49
- idle_time = sum(
50
- end - start for (w, start, end, flag) in timelines if flag == 'idle'
51
- )
52
- total_blocks = sum(end - start for (_, start, end, _) in timelines)
53
- idle_percent = (idle_time / total_blocks) * 100
54
- throughput = (num_workers * num_batches * 1000) / total_time # batches per second (approx)
55
-
56
- metrics = {
57
- "epoch_time_ms": total_time,
58
- "idle_percent": round(idle_percent, 2),
59
- "throughput": round(throughput, 2)
60
- }
61
-
62
- return timelines, metrics
63
-
64
-
65
- def plot_timeline(timelines, metrics, num_workers):
66
- colors = {'active': '#4CAF50', 'idle': '#E74C3C'}
67
- fig, ax = plt.subplots(figsize=(10, 5))
68
-
69
- for (w, start, end, flag) in timelines:
70
- ax.barh(y=w, width=end-start, left=start, color=colors[flag], edgecolor='black')
71
-
72
- ax.set_xlabel("Time (ms)")
73
- ax.set_ylabel("Worker ID")
74
- ax.set_title("Batch Scheduler Simulation")
75
- ax.set_yticks(range(num_workers))
76
- ax.set_yticklabels([f"W{i}" for i in range(num_workers)])
77
- ax.invert_yaxis()
78
-
79
- text_summary = (
80
- f"Epoch Duration: {metrics['epoch_time_ms']:.2f} ms\n"
81
- f"Idle Time: {metrics['idle_percent']}%\n"
82
- f"Throughput: {metrics['throughput']} batches/sec"
83
- )
84
-
85
- plt.figtext(0.72, 0.35, text_summary, fontsize=10,
86
- bbox=dict(facecolor='white', alpha=0.7, edgecolor='gray'))
87
- plt.tight_layout()
88
- return fig
89
-
90
-
91
  def run_simulation(num_workers, batch_time, network_latency, mode, num_batches):
 
92
  timelines, metrics = simulate_batches(
93
  num_workers=int(num_workers),
94
  batch_time=float(batch_time),
@@ -97,15 +8,14 @@ def run_simulation(num_workers, batch_time, network_latency, mode, num_batches):
97
  num_batches=int(num_batches)
98
  )
99
  fig = plot_timeline(timelines, metrics, num_workers)
100
- summary = (
101
  f"Mode: {mode.capitalize()}\n"
102
  f"Epoch Time: {metrics['epoch_time_ms']:.2f} ms\n"
103
  f"Idle Time: {metrics['idle_percent']} %\n"
104
  f"Throughput: {metrics['throughput']} batches/sec"
105
  )
106
- return fig, summary
107
-
108
 
 
109
  interface = gr.Interface(
110
  fn=run_simulation,
111
  inputs=[
@@ -117,15 +27,15 @@ interface = gr.Interface(
117
  ],
118
  outputs=[
119
  gr.Plot(label="Timeline Visualization"),
120
- gr.Textbox(label="Simulation Summary", lines=8, max_lines=12, show_copy_button=True)
121
  ],
122
  title="🧠 Batch Scheduler Simulator",
123
- description="Visualize how synchronous vs asynchronous batch scheduling affects throughput, idle time, and epoch duration.",
124
  examples=[
125
  [4, 500, 200, "synchronous", 10],
126
  [8, 400, 150, "asynchronous", 15]
127
  ]
128
  )
129
 
130
- if __name__ == "__main__":
131
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def run_simulation(num_workers, batch_time, network_latency, mode, num_batches):
2
+ """Wrapper for Gradio interface."""
3
  timelines, metrics = simulate_batches(
4
  num_workers=int(num_workers),
5
  batch_time=float(batch_time),
 
8
  num_batches=int(num_batches)
9
  )
10
  fig = plot_timeline(timelines, metrics, num_workers)
11
+ return fig, (
12
  f"Mode: {mode.capitalize()}\n"
13
  f"Epoch Time: {metrics['epoch_time_ms']:.2f} ms\n"
14
  f"Idle Time: {metrics['idle_percent']} %\n"
15
  f"Throughput: {metrics['throughput']} batches/sec"
16
  )
 
 
17
 
18
+ # Define Gradio UI
19
  interface = gr.Interface(
20
  fn=run_simulation,
21
  inputs=[
 
27
  ],
28
  outputs=[
29
  gr.Plot(label="Timeline Visualization"),
30
+ gr.Textbox(label="Simulation Summary", lines=6, max_lines=8, show_copy_button=True)
31
  ],
32
  title="🧠 Batch Scheduler Simulator",
33
+ description="Visualize how synchronous vs. asynchronous batch scheduling affects throughput, idle time, and epoch duration.",
34
  examples=[
35
  [4, 500, 200, "synchronous", 10],
36
  [8, 400, 150, "asynchronous", 15]
37
  ]
38
  )
39
 
40
+ # Launch interface
41
+ interface.launch(share=True)