Commit
·
7e8823d
1
Parent(s):
839bad5
change gpu memory display format
Browse files- gpu_info.py +8 -8
- helper.py +1 -1
gpu_info.py
CHANGED
|
@@ -1,19 +1,19 @@
|
|
| 1 |
from subprocess import check_output
|
| 2 |
from threading import Timer
|
| 3 |
-
from typing import Callable, List
|
| 4 |
|
| 5 |
|
| 6 |
def get_gpu_memory() -> List[int]:
|
| 7 |
"""
|
| 8 |
-
Get the
|
| 9 |
|
| 10 |
-
:return
|
| 11 |
"""
|
| 12 |
|
| 13 |
-
command = "nvidia-smi --query-gpu=memory.
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
return
|
| 17 |
|
| 18 |
|
| 19 |
class RepeatingTimer(Timer):
|
|
@@ -27,7 +27,7 @@ class RepeatingTimer(Timer):
|
|
| 27 |
gpu_memory_watcher: RepeatingTimer = None
|
| 28 |
|
| 29 |
|
| 30 |
-
def watch_gpu_memory(interval: int = 1, callback: Callable[[List[int]], None] = None) -> RepeatingTimer:
|
| 31 |
"""
|
| 32 |
Start a repeating timer to watch the GPU memory usage
|
| 33 |
|
|
|
|
| 1 |
from subprocess import check_output
|
| 2 |
from threading import Timer
|
| 3 |
+
from typing import Callable, List, Tuple
|
| 4 |
|
| 5 |
|
| 6 |
def get_gpu_memory() -> List[int]:
|
| 7 |
"""
|
| 8 |
+
Get the used and total GPU memory (VRAM) in MiB
|
| 9 |
|
| 10 |
+
:return memory_values: List of used and total GPU memory (VRAM) in MiB
|
| 11 |
"""
|
| 12 |
|
| 13 |
+
command = "nvidia-smi --query-gpu=memory.used,memory.total --format=csv,noheader,nounits"
|
| 14 |
+
memory_info = check_output(command.split()).decode("ascii").replace("\r", "").split("\n")[:-1]
|
| 15 |
+
memory_values = list(map(lambda x: tuple(map(int, x.split(","))), memory_info))
|
| 16 |
+
return memory_values
|
| 17 |
|
| 18 |
|
| 19 |
class RepeatingTimer(Timer):
|
|
|
|
| 27 |
gpu_memory_watcher: RepeatingTimer = None
|
| 28 |
|
| 29 |
|
| 30 |
+
def watch_gpu_memory(interval: int = 1, callback: Callable[[List[Tuple[int, int]]], None] = None) -> RepeatingTimer:
|
| 31 |
"""
|
| 32 |
Start a repeating timer to watch the GPU memory usage
|
| 33 |
|
helper.py
CHANGED
|
@@ -417,7 +417,7 @@ def generate_video(
|
|
| 417 |
)
|
| 418 |
|
| 419 |
# watch gpu memory
|
| 420 |
-
watcher = watch_gpu_memory(10, lambda x: log.debug(f"GPU memory
|
| 421 |
|
| 422 |
# start inference
|
| 423 |
if chunking <= 0:
|
|
|
|
| 417 |
)
|
| 418 |
|
| 419 |
# watch gpu memory
|
| 420 |
+
watcher = watch_gpu_memory(10, lambda x: log.debug(f"GPU memory (used, total): {x} (MiB)"))
|
| 421 |
|
| 422 |
# start inference
|
| 423 |
if chunking <= 0:
|