Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
668d7e9b
Commit
668d7e9b
authored
Feb 05, 2023
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
make it possible to load SD1 checkpoints without CLIP
parent
3e0f9a75
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
15 additions
and
8 deletions
+15
-8
sd_disable_initialization.py
modules/sd_disable_initialization.py
+10
-7
sd_models.py
modules/sd_models.py
+5
-1
No files found.
modules/sd_disable_initialization.py
View file @
668d7e9b
...
...
@@ -20,8 +20,9 @@ class DisableInitialization:
```
"""
def
__init__
(
self
):
def
__init__
(
self
,
disable_clip
=
True
):
self
.
replaced
=
[]
self
.
disable_clip
=
disable_clip
def
replace
(
self
,
obj
,
field
,
func
):
original
=
getattr
(
obj
,
field
,
None
)
...
...
@@ -75,12 +76,14 @@ class DisableInitialization:
self
.
replace
(
torch
.
nn
.
init
,
'kaiming_uniform_'
,
do_nothing
)
self
.
replace
(
torch
.
nn
.
init
,
'_no_grad_normal_'
,
do_nothing
)
self
.
replace
(
torch
.
nn
.
init
,
'_no_grad_uniform_'
,
do_nothing
)
self
.
create_model_and_transforms
=
self
.
replace
(
open_clip
,
'create_model_and_transforms'
,
create_model_and_transforms_without_pretrained
)
self
.
CLIPTextModel_from_pretrained
=
self
.
replace
(
ldm
.
modules
.
encoders
.
modules
.
CLIPTextModel
,
'from_pretrained'
,
CLIPTextModel_from_pretrained
)
self
.
transformers_modeling_utils_load_pretrained_model
=
self
.
replace
(
transformers
.
modeling_utils
.
PreTrainedModel
,
'_load_pretrained_model'
,
transformers_modeling_utils_load_pretrained_model
)
self
.
transformers_tokenization_utils_base_cached_file
=
self
.
replace
(
transformers
.
tokenization_utils_base
,
'cached_file'
,
transformers_tokenization_utils_base_cached_file
)
self
.
transformers_configuration_utils_cached_file
=
self
.
replace
(
transformers
.
configuration_utils
,
'cached_file'
,
transformers_configuration_utils_cached_file
)
self
.
transformers_utils_hub_get_from_cache
=
self
.
replace
(
transformers
.
utils
.
hub
,
'get_from_cache'
,
transformers_utils_hub_get_from_cache
)
if
self
.
disable_clip
:
self
.
create_model_and_transforms
=
self
.
replace
(
open_clip
,
'create_model_and_transforms'
,
create_model_and_transforms_without_pretrained
)
self
.
CLIPTextModel_from_pretrained
=
self
.
replace
(
ldm
.
modules
.
encoders
.
modules
.
CLIPTextModel
,
'from_pretrained'
,
CLIPTextModel_from_pretrained
)
self
.
transformers_modeling_utils_load_pretrained_model
=
self
.
replace
(
transformers
.
modeling_utils
.
PreTrainedModel
,
'_load_pretrained_model'
,
transformers_modeling_utils_load_pretrained_model
)
self
.
transformers_tokenization_utils_base_cached_file
=
self
.
replace
(
transformers
.
tokenization_utils_base
,
'cached_file'
,
transformers_tokenization_utils_base_cached_file
)
self
.
transformers_configuration_utils_cached_file
=
self
.
replace
(
transformers
.
configuration_utils
,
'cached_file'
,
transformers_configuration_utils_cached_file
)
self
.
transformers_utils_hub_get_from_cache
=
self
.
replace
(
transformers
.
utils
.
hub
,
'get_from_cache'
,
transformers_utils_hub_get_from_cache
)
def
__exit__
(
self
,
exc_type
,
exc_val
,
exc_tb
):
for
obj
,
field
,
original
in
self
.
replaced
:
...
...
modules/sd_models.py
View file @
668d7e9b
...
...
@@ -354,6 +354,9 @@ def repair_config(sd_config):
sd_config
.
model
.
params
.
unet_config
.
params
.
use_fp16
=
True
sd1_clip_weight
=
'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
sd2_clip_weight
=
'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
def
load_model
(
checkpoint_info
=
None
,
already_loaded_state_dict
=
None
,
time_taken_to_load_state_dict
=
None
):
from
modules
import
lowvram
,
sd_hijack
checkpoint_info
=
checkpoint_info
or
select_checkpoint
()
...
...
@@ -374,6 +377,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_
state_dict
=
get_checkpoint_state_dict
(
checkpoint_info
,
timer
)
checkpoint_config
=
sd_models_config
.
find_checkpoint_config
(
state_dict
,
checkpoint_info
)
clip_is_included_into_sd
=
sd1_clip_weight
in
state_dict
or
sd2_clip_weight
in
state_dict
timer
.
record
(
"find config"
)
...
...
@@ -386,7 +390,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_
sd_model
=
None
try
:
with
sd_disable_initialization
.
DisableInitialization
():
with
sd_disable_initialization
.
DisableInitialization
(
disable_clip
=
clip_is_included_into_sd
):
sd_model
=
instantiate_from_config
(
sd_config
.
model
)
except
Exception
as
e
:
pass
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment