Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
9e892d90
Unverified
Commit
9e892d90
authored
Sep 18, 2022
by
AUTOMATIC1111
Committed by
GitHub
Sep 18, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #651 from EyeDeck/master
Add some error handling for VRAM monitor
parents
83a65919
46db1405
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
32 additions
and
19 deletions
+32
-19
memmon.py
modules/memmon.py
+15
-7
ui.py
modules/ui.py
+17
-12
No files found.
modules/memmon.py
View file @
9e892d90
...
...
@@ -22,6 +22,13 @@ class MemUsageMonitor(threading.Thread):
self
.
run_flag
=
threading
.
Event
()
self
.
data
=
defaultdict
(
int
)
try
:
torch
.
cuda
.
mem_get_info
()
torch
.
cuda
.
memory_stats
(
self
.
device
)
except
Exception
as
e
:
# AMD or whatever
print
(
f
"Warning: caught exception '{e}', memory monitor disabled"
)
self
.
disabled
=
True
def
run
(
self
):
if
self
.
disabled
:
return
...
...
@@ -62,13 +69,14 @@ class MemUsageMonitor(threading.Thread):
self
.
run_flag
.
set
()
def
read
(
self
):
free
,
total
=
torch
.
cuda
.
mem_get_info
()
self
.
data
[
"total"
]
=
total
torch_stats
=
torch
.
cuda
.
memory_stats
(
self
.
device
)
self
.
data
[
"active_peak"
]
=
torch_stats
[
"active_bytes.all.peak"
]
self
.
data
[
"reserved_peak"
]
=
torch_stats
[
"reserved_bytes.all.peak"
]
self
.
data
[
"system_peak"
]
=
total
-
self
.
data
[
"min_free"
]
if
not
self
.
disabled
:
free
,
total
=
torch
.
cuda
.
mem_get_info
()
self
.
data
[
"total"
]
=
total
torch_stats
=
torch
.
cuda
.
memory_stats
(
self
.
device
)
self
.
data
[
"active_peak"
]
=
torch_stats
[
"active_bytes.all.peak"
]
self
.
data
[
"reserved_peak"
]
=
torch_stats
[
"reserved_bytes.all.peak"
]
self
.
data
[
"system_peak"
]
=
total
-
self
.
data
[
"min_free"
]
return
self
.
data
...
...
modules/ui.py
View file @
9e892d90
...
...
@@ -119,7 +119,9 @@ def save_files(js_data, images, index):
def
wrap_gradio_call
(
func
):
def
f
(
*
args
,
**
kwargs
):
shared
.
mem_mon
.
monitor
()
run_memmon
=
opts
.
memmon_poll_rate
>
0
and
not
shared
.
mem_mon
.
disabled
if
run_memmon
:
shared
.
mem_mon
.
monitor
()
t
=
time
.
perf_counter
()
try
:
...
...
@@ -136,17 +138,20 @@ def wrap_gradio_call(func):
elapsed
=
time
.
perf_counter
()
-
t
mem_stats
=
{
k
:
-
(
v
//-
(
1024
*
1024
))
for
k
,
v
in
shared
.
mem_mon
.
stop
()
.
items
()}
active_peak
=
mem_stats
[
'active_peak'
]
reserved_peak
=
mem_stats
[
'reserved_peak'
]
sys_peak
=
'?'
if
opts
.
memmon_poll_rate
<=
0
else
mem_stats
[
'system_peak'
]
sys_total
=
mem_stats
[
'total'
]
sys_pct
=
'?'
if
opts
.
memmon_poll_rate
<=
0
else
round
(
sys_peak
/
sys_total
*
100
,
2
)
vram_tooltip
=
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
"
\
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
"
\
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization
%
)."
vram_html
=
''
if
opts
.
memmon_poll_rate
==
0
else
f
"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}
%
)</p>"
if
run_memmon
:
mem_stats
=
{
k
:
-
(
v
//-
(
1024
*
1024
))
for
k
,
v
in
shared
.
mem_mon
.
stop
()
.
items
()}
active_peak
=
mem_stats
[
'active_peak'
]
reserved_peak
=
mem_stats
[
'reserved_peak'
]
sys_peak
=
mem_stats
[
'system_peak'
]
sys_total
=
mem_stats
[
'total'
]
sys_pct
=
round
(
sys_peak
/
max
(
sys_total
,
1
)
*
100
,
2
)
vram_tooltip
=
"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
"
\
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
"
\
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization
%
)."
vram_html
=
f
"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}
%
)</p>"
else
:
vram_html
=
''
# last item is always HTML
res
[
-
1
]
+=
f
"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment