Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
6a02841f
Unverified
Commit
6a02841f
authored
Oct 22, 2022
by
discus0434
Committed by
GitHub
Oct 22, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #2 from aria1th/patch-6
generalized some functions and option for ignoring first layer
parents
f8733ad0
f89829ec
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
8 deletions
+15
-8
hypernetwork.py
modules/hypernetworks/hypernetwork.py
+15
-8
No files found.
modules/hypernetworks/hypernetwork.py
View file @
6a02841f
...
...
@@ -21,21 +21,27 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler
class
HypernetworkModule
(
torch
.
nn
.
Module
):
multiplier
=
1.0
activation_dict
=
{
"relu"
:
torch
.
nn
.
ReLU
,
"leakyrelu"
:
torch
.
nn
.
LeakyReLU
,
"elu"
:
torch
.
nn
.
ELU
,
"swish"
:
torch
.
nn
.
Hardswish
}
def
__init__
(
self
,
dim
,
state_dict
=
None
,
layer_structure
=
None
,
add_layer_norm
=
False
,
activation_func
=
None
):
super
()
.
__init__
()
assert
layer_structure
is
not
None
,
"layer_structure must not be None"
assert
layer_structure
[
0
]
==
1
,
"Multiplier Sequence should start with size 1!"
assert
layer_structure
[
-
1
]
==
1
,
"Multiplier Sequence should end with size 1!"
linears
=
[]
for
i
in
range
(
len
(
layer_structure
)
-
1
):
linears
.
append
(
torch
.
nn
.
Linear
(
int
(
dim
*
layer_structure
[
i
]),
int
(
dim
*
layer_structure
[
i
+
1
])))
if
activation_func
==
"relu"
:
linears
.
append
(
torch
.
nn
.
ReLU
())
if
activation_func
==
"leakyrelu"
:
linears
.
append
(
torch
.
nn
.
LeakyReLU
())
# if skip_first_layer because first parameters potentially contain negative values
# if i < 1: continue
if
activation_func
in
HypernetworkModule
.
activation_dict
:
linears
.
append
(
HypernetworkModule
.
activation_dict
[
activation_func
]())
else
:
print
(
"Invalid key {} encountered as activation function!"
.
format
(
activation_func
))
# if use_dropout:
# linears.append(torch.nn.Dropout(p=0.3))
if
add_layer_norm
:
linears
.
append
(
torch
.
nn
.
LayerNorm
(
int
(
dim
*
layer_structure
[
i
+
1
])))
...
...
@@ -46,7 +52,7 @@ class HypernetworkModule(torch.nn.Module):
self
.
load_state_dict
(
state_dict
)
else
:
for
layer
in
self
.
linear
:
if
not
"ReLU"
in
layer
.
__str__
(
):
if
isinstance
(
layer
,
torch
.
nn
.
Linear
):
layer
.
weight
.
data
.
normal_
(
mean
=
0.0
,
std
=
0.01
)
layer
.
bias
.
data
.
zero_
()
...
...
@@ -74,7 +80,7 @@ class HypernetworkModule(torch.nn.Module):
def
trainables
(
self
):
layer_structure
=
[]
for
layer
in
self
.
linear
:
if
not
"ReLU"
in
layer
.
__str__
(
):
if
isinstance
(
layer
,
torch
.
nn
.
Linear
):
layer_structure
+=
[
layer
.
weight
,
layer
.
bias
]
return
layer_structure
...
...
@@ -298,6 +304,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
return
hypernetwork
,
filename
scheduler
=
LearnRateScheduler
(
learn_rate
,
steps
,
ititial_step
)
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer
=
torch
.
optim
.
AdamW
(
weights
,
lr
=
scheduler
.
learn_rate
)
pbar
=
tqdm
.
tqdm
(
enumerate
(
ds
),
total
=
steps
-
ititial_step
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment