Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
685f9631
Unverified
Commit
685f9631
authored
Dec 10, 2022
by
AUTOMATIC1111
Committed by
GitHub
Dec 10, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #5586 from wywywywy/ldsr-improvements
LDSR improvements - cache / optimization / opt_channelslast
parents
0a81dd52
1581d5a1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
35 additions
and
15 deletions
+35
-15
ldsr_model_arch.py
extensions-builtin/LDSR/ldsr_model_arch.py
+34
-15
ldsr_model.py
extensions-builtin/LDSR/scripts/ldsr_model.py
+1
-0
No files found.
extensions-builtin/LDSR/ldsr_model_arch.py
View file @
685f9631
...
@@ -11,25 +11,41 @@ from omegaconf import OmegaConf
...
@@ -11,25 +11,41 @@ from omegaconf import OmegaConf
from
ldm.models.diffusion.ddim
import
DDIMSampler
from
ldm.models.diffusion.ddim
import
DDIMSampler
from
ldm.util
import
instantiate_from_config
,
ismap
from
ldm.util
import
instantiate_from_config
,
ismap
from
modules
import
shared
,
sd_hijack
warnings
.
filterwarnings
(
"ignore"
,
category
=
UserWarning
)
warnings
.
filterwarnings
(
"ignore"
,
category
=
UserWarning
)
cached_ldsr_model
:
torch
.
nn
.
Module
=
None
# Create LDSR Class
# Create LDSR Class
class
LDSR
:
class
LDSR
:
def
load_model_from_config
(
self
,
half_attention
):
def
load_model_from_config
(
self
,
half_attention
):
print
(
f
"Loading model from {self.modelPath}"
)
global
cached_ldsr_model
pl_sd
=
torch
.
load
(
self
.
modelPath
,
map_location
=
"cpu"
)
sd
=
pl_sd
[
"state_dict"
]
if
shared
.
opts
.
ldsr_cached
and
cached_ldsr_model
is
not
None
:
config
=
OmegaConf
.
load
(
self
.
yamlPath
)
print
(
f
"Loading model from cache"
)
config
.
model
.
target
=
"ldm.models.diffusion.ddpm.LatentDiffusionV1"
model
:
torch
.
nn
.
Module
=
cached_ldsr_model
model
=
instantiate_from_config
(
config
.
model
)
else
:
model
.
load_state_dict
(
sd
,
strict
=
False
)
print
(
f
"Loading model from {self.modelPath}"
)
model
.
cuda
()
pl_sd
=
torch
.
load
(
self
.
modelPath
,
map_location
=
"cpu"
)
if
half_attention
:
sd
=
pl_sd
[
"state_dict"
]
model
=
model
.
half
()
config
=
OmegaConf
.
load
(
self
.
yamlPath
)
config
.
model
.
target
=
"ldm.models.diffusion.ddpm.LatentDiffusionV1"
model
.
eval
()
model
:
torch
.
nn
.
Module
=
instantiate_from_config
(
config
.
model
)
model
.
load_state_dict
(
sd
,
strict
=
False
)
model
=
model
.
to
(
shared
.
device
)
if
half_attention
:
model
=
model
.
half
()
if
shared
.
cmd_opts
.
opt_channelslast
:
model
=
model
.
to
(
memory_format
=
torch
.
channels_last
)
sd_hijack
.
model_hijack
.
hijack
(
model
)
# apply optimization
model
.
eval
()
if
shared
.
opts
.
ldsr_cached
:
cached_ldsr_model
=
model
return
{
"model"
:
model
}
return
{
"model"
:
model
}
def
__init__
(
self
,
model_path
,
yaml_path
):
def
__init__
(
self
,
model_path
,
yaml_path
):
...
@@ -94,7 +110,8 @@ class LDSR:
...
@@ -94,7 +110,8 @@ class LDSR:
down_sample_method
=
'Lanczos'
down_sample_method
=
'Lanczos'
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
if
torch
.
cuda
.
is_available
:
torch
.
cuda
.
empty_cache
()
im_og
=
image
im_og
=
image
width_og
,
height_og
=
im_og
.
size
width_og
,
height_og
=
im_og
.
size
...
@@ -131,7 +148,9 @@ class LDSR:
...
@@ -131,7 +148,9 @@ class LDSR:
del
model
del
model
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
if
torch
.
cuda
.
is_available
:
torch
.
cuda
.
empty_cache
()
return
a
return
a
...
@@ -146,7 +165,7 @@ def get_cond(selected_path):
...
@@ -146,7 +165,7 @@ def get_cond(selected_path):
c
=
rearrange
(
c
,
'1 c h w -> 1 h w c'
)
c
=
rearrange
(
c
,
'1 c h w -> 1 h w c'
)
c
=
2.
*
c
-
1.
c
=
2.
*
c
-
1.
c
=
c
.
to
(
torch
.
device
(
"cuda"
)
)
c
=
c
.
to
(
shared
.
device
)
example
[
"LR_image"
]
=
c
example
[
"LR_image"
]
=
c
example
[
"image"
]
=
c_up
example
[
"image"
]
=
c_up
...
...
extensions-builtin/LDSR/scripts/ldsr_model.py
View file @
685f9631
...
@@ -59,6 +59,7 @@ def on_ui_settings():
...
@@ -59,6 +59,7 @@ def on_ui_settings():
import
gradio
as
gr
import
gradio
as
gr
shared
.
opts
.
add_option
(
"ldsr_steps"
,
shared
.
OptionInfo
(
100
,
"LDSR processing steps. Lower = faster"
,
gr
.
Slider
,
{
"minimum"
:
1
,
"maximum"
:
200
,
"step"
:
1
},
section
=
(
'upscaling'
,
"Upscaling"
)))
shared
.
opts
.
add_option
(
"ldsr_steps"
,
shared
.
OptionInfo
(
100
,
"LDSR processing steps. Lower = faster"
,
gr
.
Slider
,
{
"minimum"
:
1
,
"maximum"
:
200
,
"step"
:
1
},
section
=
(
'upscaling'
,
"Upscaling"
)))
shared
.
opts
.
add_option
(
"ldsr_cached"
,
shared
.
OptionInfo
(
False
,
"Cache LDSR model in memory"
,
gr
.
Checkbox
,
{
"interactive"
:
True
},
section
=
(
'upscaling'
,
"Upscaling"
)))
script_callbacks
.
on_ui_settings
(
on_ui_settings
)
script_callbacks
.
on_ui_settings
(
on_ui_settings
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment