Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
283249d2
Commit
283249d2
authored
Nov 04, 2022
by
aria1th
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
apply
parent
179702ad
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
49 additions
and
5 deletions
+49
-5
hypernetwork.py
modules/hypernetworks/hypernetwork.py
+49
-5
No files found.
modules/hypernetworks/hypernetwork.py
View file @
283249d2
...
@@ -22,6 +22,8 @@ from collections import defaultdict, deque
...
@@ -22,6 +22,8 @@ from collections import defaultdict, deque
from
statistics
import
stdev
,
mean
from
statistics
import
stdev
,
mean
optimizer_dict
=
{
optim_name
:
cls_obj
for
optim_name
,
cls_obj
in
inspect
.
getmembers
(
torch
.
optim
,
inspect
.
isclass
)
if
optim_name
!=
"Optimizer"
}
class
HypernetworkModule
(
torch
.
nn
.
Module
):
class
HypernetworkModule
(
torch
.
nn
.
Module
):
multiplier
=
1.0
multiplier
=
1.0
activation_dict
=
{
activation_dict
=
{
...
@@ -142,6 +144,8 @@ class Hypernetwork:
...
@@ -142,6 +144,8 @@ class Hypernetwork:
self
.
use_dropout
=
use_dropout
self
.
use_dropout
=
use_dropout
self
.
activate_output
=
activate_output
self
.
activate_output
=
activate_output
self
.
last_layer_dropout
=
kwargs
[
'last_layer_dropout'
]
if
'last_layer_dropout'
in
kwargs
else
True
self
.
last_layer_dropout
=
kwargs
[
'last_layer_dropout'
]
if
'last_layer_dropout'
in
kwargs
else
True
self
.
optimizer_name
=
None
self
.
optimizer_state_dict
=
None
for
size
in
enable_sizes
or
[]:
for
size
in
enable_sizes
or
[]:
self
.
layers
[
size
]
=
(
self
.
layers
[
size
]
=
(
...
@@ -163,6 +167,7 @@ class Hypernetwork:
...
@@ -163,6 +167,7 @@ class Hypernetwork:
def
save
(
self
,
filename
):
def
save
(
self
,
filename
):
state_dict
=
{}
state_dict
=
{}
optimizer_saved_dict
=
{}
for
k
,
v
in
self
.
layers
.
items
():
for
k
,
v
in
self
.
layers
.
items
():
state_dict
[
k
]
=
(
v
[
0
]
.
state_dict
(),
v
[
1
]
.
state_dict
())
state_dict
[
k
]
=
(
v
[
0
]
.
state_dict
(),
v
[
1
]
.
state_dict
())
...
@@ -179,7 +184,14 @@ class Hypernetwork:
...
@@ -179,7 +184,14 @@ class Hypernetwork:
state_dict
[
'activate_output'
]
=
self
.
activate_output
state_dict
[
'activate_output'
]
=
self
.
activate_output
state_dict
[
'last_layer_dropout'
]
=
self
.
last_layer_dropout
state_dict
[
'last_layer_dropout'
]
=
self
.
last_layer_dropout
if
self
.
optimizer_name
is
not
None
:
optimizer_saved_dict
[
'optimizer_name'
]
=
self
.
optimizer_name
torch
.
save
(
state_dict
,
filename
)
torch
.
save
(
state_dict
,
filename
)
if
self
.
optimizer_state_dict
:
optimizer_saved_dict
[
'hash'
]
=
sd_models
.
model_hash
(
filename
)
optimizer_saved_dict
[
'optimizer_state_dict'
]
=
self
.
optimizer_state_dict
torch
.
save
(
optimizer_saved_dict
,
filename
+
'.optim'
)
def
load
(
self
,
filename
):
def
load
(
self
,
filename
):
self
.
filename
=
filename
self
.
filename
=
filename
...
@@ -202,6 +214,18 @@ class Hypernetwork:
...
@@ -202,6 +214,18 @@ class Hypernetwork:
print
(
f
"Activate last layer is set to {self.activate_output}"
)
print
(
f
"Activate last layer is set to {self.activate_output}"
)
self
.
last_layer_dropout
=
state_dict
.
get
(
'last_layer_dropout'
,
False
)
self
.
last_layer_dropout
=
state_dict
.
get
(
'last_layer_dropout'
,
False
)
optimizer_saved_dict
=
torch
.
load
(
self
.
filename
+
'.optim'
,
map_location
=
'cpu'
)
if
os
.
path
.
exists
(
self
.
filename
+
'.optim'
)
else
{}
self
.
optimizer_name
=
optimizer_saved_dict
.
get
(
'optimizer_name'
,
'AdamW'
)
print
(
f
"Optimizer name is {self.optimizer_name}"
)
if
sd_models
.
model_hash
(
filename
)
==
optimizer_saved_dict
.
get
(
'hash'
,
None
):
self
.
optimizer_state_dict
=
optimizer_saved_dict
.
get
(
'optimizer_state_dict'
,
None
)
else
:
self
.
optimizer_state_dict
=
None
if
self
.
optimizer_state_dict
:
print
(
"Loaded existing optimizer from checkpoint"
)
else
:
print
(
"No saved optimizer exists in checkpoint"
)
for
size
,
sd
in
state_dict
.
items
():
for
size
,
sd
in
state_dict
.
items
():
if
type
(
size
)
==
int
:
if
type
(
size
)
==
int
:
self
.
layers
[
size
]
=
(
self
.
layers
[
size
]
=
(
...
@@ -223,7 +247,7 @@ def list_hypernetworks(path):
...
@@ -223,7 +247,7 @@ def list_hypernetworks(path):
name
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
filename
))[
0
]
name
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
filename
))[
0
]
# Prevent a hypothetical "None.pt" from being listed.
# Prevent a hypothetical "None.pt" from being listed.
if
name
!=
"None"
:
if
name
!=
"None"
:
res
[
name
]
=
filename
res
[
name
+
f
"({sd_models.model_hash(filename)})"
]
=
filename
return
res
return
res
...
@@ -369,6 +393,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
...
@@ -369,6 +393,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else
:
else
:
hypernetwork_dir
=
None
hypernetwork_dir
=
None
hypernetwork_name
=
hypernetwork_name
.
rsplit
(
'('
,
1
)[
0
]
if
create_image_every
>
0
:
if
create_image_every
>
0
:
images_dir
=
os
.
path
.
join
(
log_directory
,
"images"
)
images_dir
=
os
.
path
.
join
(
log_directory
,
"images"
)
os
.
makedirs
(
images_dir
,
exist_ok
=
True
)
os
.
makedirs
(
images_dir
,
exist_ok
=
True
)
...
@@ -404,8 +429,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
...
@@ -404,8 +429,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
weights
=
hypernetwork
.
weights
()
weights
=
hypernetwork
.
weights
()
for
weight
in
weights
:
for
weight
in
weights
:
weight
.
requires_grad
=
True
weight
.
requires_grad
=
True
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
# Here we use optimizer from saved HN, or we can specify as UI option.
optimizer
=
torch
.
optim
.
AdamW
(
weights
,
lr
=
scheduler
.
learn_rate
)
if
(
optimizer_name
:
=
hypernetwork
.
optimizer_name
)
in
optimizer_dict
:
optimizer
=
optimizer_dict
[
hypernetwork
.
optimizer_name
](
params
=
weights
,
lr
=
scheduler
.
learn_rate
)
else
:
print
(
f
"Optimizer type {optimizer_name} is not defined!"
)
optimizer
=
torch
.
optim
.
AdamW
(
params
=
weights
,
lr
=
scheduler
.
learn_rate
)
optimizer_name
=
'AdamW'
if
hypernetwork
.
optimizer_state_dict
:
# This line must be changed if Optimizer type can be different from saved optimizer.
try
:
optimizer
.
load_state_dict
(
hypernetwork
.
optimizer_state_dict
)
except
RuntimeError
as
e
:
print
(
"Cannot resume from saved optimizer!"
)
print
(
e
)
steps_without_grad
=
0
steps_without_grad
=
0
...
@@ -467,7 +503,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
...
@@ -467,7 +503,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# Before saving, change name to match current checkpoint.
# Before saving, change name to match current checkpoint.
hypernetwork_name_every
=
f
'{hypernetwork_name}-{steps_done}'
hypernetwork_name_every
=
f
'{hypernetwork_name}-{steps_done}'
last_saved_file
=
os
.
path
.
join
(
hypernetwork_dir
,
f
'{hypernetwork_name_every}.pt'
)
last_saved_file
=
os
.
path
.
join
(
hypernetwork_dir
,
f
'{hypernetwork_name_every}.pt'
)
hypernetwork
.
optimizer_name
=
optimizer_name
if
shared
.
opts
.
save_optimizer_state
:
hypernetwork
.
optimizer_state_dict
=
optimizer
.
state_dict
()
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
last_saved_file
)
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
last_saved_file
)
hypernetwork
.
optimizer_state_dict
=
None
# dereference it after saving, to save memory.
textual_inversion
.
write_loss
(
log_directory
,
"hypernetwork_loss.csv"
,
hypernetwork
.
step
,
len
(
ds
),
{
textual_inversion
.
write_loss
(
log_directory
,
"hypernetwork_loss.csv"
,
hypernetwork
.
step
,
len
(
ds
),
{
"loss"
:
f
"{previous_mean_loss:.7f}"
,
"loss"
:
f
"{previous_mean_loss:.7f}"
,
...
@@ -530,8 +570,12 @@ Last saved image: {html.escape(last_saved_image)}<br/>
...
@@ -530,8 +570,12 @@ Last saved image: {html.escape(last_saved_image)}<br/>
report_statistics
(
loss_dict
)
report_statistics
(
loss_dict
)
filename
=
os
.
path
.
join
(
shared
.
cmd_opts
.
hypernetwork_dir
,
f
'{hypernetwork_name}.pt'
)
filename
=
os
.
path
.
join
(
shared
.
cmd_opts
.
hypernetwork_dir
,
f
'{hypernetwork_name}.pt'
)
hypernetwork
.
optimizer_name
=
optimizer_name
if
shared
.
opts
.
save_optimizer_state
:
hypernetwork
.
optimizer_state_dict
=
optimizer
.
state_dict
()
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
filename
)
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
filename
)
del
optimizer
hypernetwork
.
optimizer_state_dict
=
None
# dereference it after saving, to save memory.
return
hypernetwork
,
filename
return
hypernetwork
,
filename
def
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
filename
):
def
save_hypernetwork
(
hypernetwork
,
checkpoint
,
hypernetwork_name
,
filename
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment