Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
fabaf4bd
Commit
fabaf4bd
authored
Sep 18, 2022
by
EyeDeck
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add some error handling for VRAM monitor
parent
7e779382
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
31 additions
and
19 deletions
+31
-19
memmon.py
modules/memmon.py
+15
-7
ui.py
modules/ui.py
+16
-12
No files found.
modules/memmon.py
View file @
fabaf4bd
...
@@ -22,6 +22,13 @@ class MemUsageMonitor(threading.Thread):
...
@@ -22,6 +22,13 @@ class MemUsageMonitor(threading.Thread):
self
.
run_flag
=
threading
.
Event
()
self
.
run_flag
=
threading
.
Event
()
self
.
data
=
defaultdict
(
int
)
self
.
data
=
defaultdict
(
int
)
try
:
torch
.
cuda
.
mem_get_info
()
torch
.
cuda
.
memory_stats
(
self
.
device
)
except
Exception
as
e
:
# AMD or whatever
print
(
f
"Warning: caught exception '{e}', memory monitor disabled"
)
self
.
disabled
=
True
def
run
(
self
):
def
run
(
self
):
if
self
.
disabled
:
if
self
.
disabled
:
return
return
...
@@ -62,13 +69,14 @@ class MemUsageMonitor(threading.Thread):
...
@@ -62,13 +69,14 @@ class MemUsageMonitor(threading.Thread):
self
.
run_flag
.
set
()
self
.
run_flag
.
set
()
def
read
(
self
):
def
read
(
self
):
free
,
total
=
torch
.
cuda
.
mem_get_info
()
if
not
self
.
disabled
:
self
.
data
[
"total"
]
=
total
free
,
total
=
torch
.
cuda
.
mem_get_info
()
self
.
data
[
"total"
]
=
total
torch_stats
=
torch
.
cuda
.
memory_stats
(
self
.
device
)
self
.
data
[
"active_peak"
]
=
torch_stats
[
"active_bytes.all.peak"
]
torch_stats
=
torch
.
cuda
.
memory_stats
(
self
.
device
)
self
.
data
[
"reserved_peak"
]
=
torch_stats
[
"reserved_bytes.all.peak"
]
self
.
data
[
"active_peak"
]
=
torch_stats
[
"active_bytes.all.peak"
]
self
.
data
[
"system_peak"
]
=
total
-
self
.
data
[
"min_free"
]
self
.
data
[
"reserved_peak"
]
=
torch_stats
[
"reserved_bytes.all.peak"
]
self
.
data
[
"system_peak"
]
=
total
-
self
.
data
[
"min_free"
]
return
self
.
data
return
self
.
data
...
...
modules/ui.py
View file @
fabaf4bd
...
@@ -119,7 +119,8 @@ def save_files(js_data, images, index):
...
@@ -119,7 +119,8 @@ def save_files(js_data, images, index):
def
wrap_gradio_call
(
func
):
def
wrap_gradio_call
(
func
):
def
f
(
*
args
,
**
kwargs
):
def
f
(
*
args
,
**
kwargs
):
shared
.
mem_mon
.
monitor
()
if
opts
.
memmon_poll_rate
>
0
and
not
shared
.
mem_mon
.
disabled
:
shared
.
mem_mon
.
monitor
()
t
=
time
.
perf_counter
()
t
=
time
.
perf_counter
()
try
:
try
:
...
@@ -136,17 +137,20 @@ def wrap_gradio_call(func):
...
@@ -136,17 +137,20 @@ def wrap_gradio_call(func):
elapsed
=
time
.
perf_counter
()
-
t
elapsed
=
time
.
perf_counter
()
-
t
mem_stats
=
{
k
:
-
(
v
//-
(
1024
*
1024
))
for
k
,
v
in
shared
.
mem_mon
.
stop
()
.
items
()}
if
opts
.
memmon_poll_rate
>
0
and
not
shared
.
mem_mon
.
disabled
:
active_peak
=
mem_stats
[
'active_peak'
]
mem_stats
=
{
k
:
-
(
v
//-
(
1024
*
1024
))
for
k
,
v
in
shared
.
mem_mon
.
stop
()
.
items
()}
reserved_peak
=
mem_stats
[
'reserved_peak'
]
active_peak
=
mem_stats
[
'active_peak'
]
sys_peak
=
'?'
if
opts
.
memmon_poll_rate
<=
0
else
mem_stats
[
'system_peak'
]
reserved_peak
=
mem_stats
[
'reserved_peak'
]
sys_total
=
mem_stats
[
'total'
]
sys_peak
=
mem_stats
[
'system_peak'
]
sys_pct
=
'?'
if
opts
.
memmon_poll_rate
<=
0
else
round
(
sys_peak
/
sys_total
*
100
,
2
)
sys_total
=
mem_stats
[
'total'
]
vram_tooltip
=
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
"
\
sys_pct
=
round
(
sys_peak
/
max
(
sys_total
,
1
)
*
100
,
2
)
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
"
\
vram_tooltip
=
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
"
\
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization
%
)."
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
"
\
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization
%
)."
vram_html
=
''
if
opts
.
memmon_poll_rate
==
0
else
f
"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}
%
)</p>"
vram_html
=
f
"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}
%
)</p>"
else
:
vram_html
=
''
# last item is always HTML
# last item is always HTML
res
[
-
1
]
+=
f
"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
res
[
-
1
]
+=
f
"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment