Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
17f9e556
Unverified
Commit
17f9e556
authored
Nov 04, 2022
by
AUTOMATIC1111
Committed by
GitHub
Nov 04, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #4036 from R-N/fix-ckpt-cache
Fix 1 checkpoint cache count being useless #4035
parents
352b3310
24fc05cf
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
9 additions
and
12 deletions
+9
-12
sd_models.py
modules/sd_models.py
+9
-12
No files found.
modules/sd_models.py
View file @
17f9e556
...
@@ -163,11 +163,11 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
...
@@ -163,11 +163,11 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file
=
checkpoint_info
.
filename
checkpoint_file
=
checkpoint_info
.
filename
sd_model_hash
=
checkpoint_info
.
hash
sd_model_hash
=
checkpoint_info
.
hash
vae_file
=
sd_vae
.
resolve_vae
(
checkpoint_file
,
vae_file
=
vae_file
)
if
shared
.
opts
.
sd_checkpoint_cache
>
0
and
hasattr
(
model
,
"sd_checkpoint_info"
):
sd_vae
.
restore_base_vae
(
model
)
checkpoints_loaded
[
model
.
sd_checkpoint_info
]
=
model
.
state_dict
()
.
copy
()
checkpoint_key
=
checkpoint_info
if
checkpoint_info
not
in
checkpoints_loaded
:
if
checkpoint_key
not
in
checkpoints_loaded
:
print
(
f
"Loading weights [{sd_model_hash}] from {checkpoint_file}"
)
print
(
f
"Loading weights [{sd_model_hash}] from {checkpoint_file}"
)
pl_sd
=
torch
.
load
(
checkpoint_file
,
map_location
=
shared
.
weight_load_location
)
pl_sd
=
torch
.
load
(
checkpoint_file
,
map_location
=
shared
.
weight_load_location
)
...
@@ -197,18 +197,15 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
...
@@ -197,18 +197,15 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model
.
first_stage_model
.
to
(
devices
.
dtype_vae
)
model
.
first_stage_model
.
to
(
devices
.
dtype_vae
)
if
shared
.
opts
.
sd_checkpoint_cache
>
0
:
# if PR #4035 were to get merged, restore base VAE first before caching
checkpoints_loaded
[
checkpoint_key
]
=
model
.
state_dict
()
.
copy
()
while
len
(
checkpoints_loaded
)
>
shared
.
opts
.
sd_checkpoint_cache
:
checkpoints_loaded
.
popitem
(
last
=
False
)
# LRU
else
:
else
:
vae_name
=
sd_vae
.
get_filename
(
vae_file
)
if
vae_file
else
None
vae_name
=
sd_vae
.
get_filename
(
vae_file
)
if
vae_file
else
None
vae_message
=
f
" with {vae_name} VAE"
if
vae_name
else
""
vae_message
=
f
" with {vae_name} VAE"
if
vae_name
else
""
print
(
f
"Loading weights [{sd_model_hash}]{vae_message} from cache"
)
print
(
f
"Loading weights [{sd_model_hash}]{vae_message} from cache"
)
checkpoints_loaded
.
move_to_end
(
checkpoint_key
)
model
.
load_state_dict
(
checkpoints_loaded
[
checkpoint_info
])
model
.
load_state_dict
(
checkpoints_loaded
[
checkpoint_key
])
if
shared
.
opts
.
sd_checkpoint_cache
>
0
:
while
len
(
checkpoints_loaded
)
>
shared
.
opts
.
sd_checkpoint_cache
:
checkpoints_loaded
.
popitem
(
last
=
False
)
# LRU
model
.
sd_model_hash
=
sd_model_hash
model
.
sd_model_hash
=
sd_model_hash
model
.
sd_model_checkpoint
=
checkpoint_file
model
.
sd_model_checkpoint
=
checkpoint_file
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment