Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
9bb20be0
Commit
9bb20be0
authored
Sep 12, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
memory optimization for CLIP interrogator
changed default cfg_scale to a higher value
parent
ab0a79cd
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
36 additions
and
7 deletions
+36
-7
interrogate.py
modules/interrogate.py
+23
-5
lowvram.py
modules/lowvram.py
+10
-0
shared.py
modules/shared.py
+1
-0
ui.py
modules/ui.py
+2
-2
No files found.
modules/interrogate.py
View file @
9bb20be0
...
...
@@ -11,7 +11,7 @@ from torchvision import transforms
from
torchvision.transforms.functional
import
InterpolationMode
import
modules.shared
as
shared
from
modules
import
devices
,
paths
from
modules
import
devices
,
paths
,
lowvram
blip_image_eval_size
=
384
blip_model_url
=
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
...
...
@@ -75,19 +75,28 @@ class InterrogateModels:
self
.
dtype
=
next
(
self
.
clip_model
.
parameters
())
.
dtype
def
unload
(
self
):
def
send_clip_to_ram
(
self
):
if
not
shared
.
opts
.
interrogate_keep_models_in_memory
:
if
self
.
clip_model
is
not
None
:
self
.
clip_model
=
self
.
clip_model
.
to
(
devices
.
cpu
)
def
send_blip_to_ram
(
self
):
if
not
shared
.
opts
.
interrogate_keep_models_in_memory
:
if
self
.
blip_model
is
not
None
:
self
.
blip_model
=
self
.
blip_model
.
to
(
devices
.
cpu
)
devices
.
torch_gc
()
def
unload
(
self
):
self
.
send_clip_to_ram
()
self
.
send_blip_to_ram
()
devices
.
torch_gc
()
def
rank
(
self
,
image_features
,
text_array
,
top_count
=
1
):
import
clip
if
shared
.
opts
.
interrogate_clip_dict_limit
!=
0
:
text_array
=
text_array
[
0
:
int
(
shared
.
opts
.
interrogate_clip_dict_limit
)]
top_count
=
min
(
top_count
,
len
(
text_array
))
text_tokens
=
clip
.
tokenize
([
text
for
text
in
text_array
])
.
to
(
shared
.
device
)
text_features
=
self
.
clip_model
.
encode_text
(
text_tokens
)
.
type
(
self
.
dtype
)
...
...
@@ -117,16 +126,24 @@ class InterrogateModels:
res
=
None
try
:
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
lowvram
.
send_everything_to_cpu
()
devices
.
torch_gc
()
self
.
load
()
caption
=
self
.
generate_caption
(
pil_image
)
self
.
send_blip_to_ram
()
devices
.
torch_gc
()
res
=
caption
images
=
self
.
clip_preprocess
(
pil_image
)
.
unsqueeze
(
0
)
.
type
(
self
.
dtype
)
.
to
(
shared
.
device
)
cilp_image
=
self
.
clip_preprocess
(
pil_image
)
.
unsqueeze
(
0
)
.
type
(
self
.
dtype
)
.
to
(
shared
.
device
)
precision_scope
=
torch
.
autocast
if
shared
.
cmd_opts
.
precision
==
"autocast"
else
contextlib
.
nullcontext
with
torch
.
no_grad
(),
precision_scope
(
"cuda"
):
image_features
=
self
.
clip_model
.
encode_image
(
images
)
.
type
(
self
.
dtype
)
image_features
=
self
.
clip_model
.
encode_image
(
cilp_image
)
.
type
(
self
.
dtype
)
image_features
/=
image_features
.
norm
(
dim
=-
1
,
keepdim
=
True
)
...
...
@@ -146,4 +163,5 @@ class InterrogateModels:
self
.
unload
()
res
+=
"<error>"
return
res
modules/lowvram.py
View file @
9bb20be0
...
...
@@ -5,6 +5,16 @@ module_in_gpu = None
cpu
=
torch
.
device
(
"cpu"
)
device
=
gpu
=
get_optimal_device
()
def
send_everything_to_cpu
():
global
module_in_gpu
if
module_in_gpu
is
not
None
:
module_in_gpu
.
to
(
cpu
)
module_in_gpu
=
None
def
setup_for_low_vram
(
sd_model
,
use_medvram
):
parents
=
{}
...
...
modules/shared.py
View file @
9bb20be0
...
...
@@ -132,6 +132,7 @@ class Options:
"interrogate_clip_num_beams"
:
OptionInfo
(
1
,
"Interrogate: num_beams for BLIP"
,
gr
.
Slider
,
{
"minimum"
:
1
,
"maximum"
:
16
,
"step"
:
1
}),
"interrogate_clip_min_length"
:
OptionInfo
(
24
,
"Interrogate: minimum descripton length (excluding artists, etc..)"
,
gr
.
Slider
,
{
"minimum"
:
1
,
"maximum"
:
128
,
"step"
:
1
}),
"interrogate_clip_max_length"
:
OptionInfo
(
48
,
"Interrogate: maximum descripton length"
,
gr
.
Slider
,
{
"minimum"
:
1
,
"maximum"
:
256
,
"step"
:
1
}),
"interrogate_clip_dict_limit"
:
OptionInfo
(
1500
,
"Interrogate: maximum number of lines in text file (0 = No limit)"
),
}
def
__init__
(
self
):
...
...
modules/ui.py
View file @
9bb20be0
...
...
@@ -270,7 +270,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
batch_count
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
cmd_opts
.
max_batch_count
,
step
=
1
,
label
=
'Batch count'
,
value
=
1
)
batch_size
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
8
,
step
=
1
,
label
=
'Batch size'
,
value
=
1
)
cfg_scale
=
gr
.
Slider
(
minimum
=
1.0
,
maximum
=
15
.0
,
step
=
0.5
,
label
=
'CFG Scale'
,
value
=
7.0
)
cfg_scale
=
gr
.
Slider
(
minimum
=
1.0
,
maximum
=
30
.0
,
step
=
0.5
,
label
=
'CFG Scale'
,
value
=
7.0
)
with
gr
.
Group
():
height
=
gr
.
Slider
(
minimum
=
64
,
maximum
=
2048
,
step
=
64
,
label
=
"Height"
,
value
=
512
)
...
...
@@ -413,7 +413,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
batch_size
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
8
,
step
=
1
,
label
=
'Batch size'
,
value
=
1
)
with
gr
.
Group
():
cfg_scale
=
gr
.
Slider
(
minimum
=
1.0
,
maximum
=
15
.0
,
step
=
0.5
,
label
=
'CFG Scale'
,
value
=
7.0
)
cfg_scale
=
gr
.
Slider
(
minimum
=
1.0
,
maximum
=
30
.0
,
step
=
0.5
,
label
=
'CFG Scale'
,
value
=
7.0
)
denoising_strength
=
gr
.
Slider
(
minimum
=
0.0
,
maximum
=
1.0
,
step
=
0.01
,
label
=
'Denoising strength'
,
value
=
0.75
)
denoising_strength_change_factor
=
gr
.
Slider
(
minimum
=
0.9
,
maximum
=
1.1
,
step
=
0.01
,
label
=
'Denoising strength change factor'
,
value
=
1
,
visible
=
False
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment