Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
5b57f61b
Commit
5b57f61b
authored
Nov 21, 2022
by
flamelaw
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix pin_memory with different latent sampling method
parent
2d22d72c
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
24 additions
and
11 deletions
+24
-11
hypernetwork.py
modules/hypernetworks/hypernetwork.py
+4
-1
dataset.py
modules/textual_inversion/dataset.py
+19
-4
textual_inversion.py
modules/textual_inversion/textual_inversion.py
+1
-6
No files found.
modules/hypernetworks/hypernetwork.py
View file @
5b57f61b
...
...
@@ -416,7 +416,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
pin_memory
=
shared
.
opts
.
pin_memory
ds
=
modules
.
textual_inversion
.
dataset
.
PersonalizedBase
(
data_root
=
data_root
,
width
=
training_width
,
height
=
training_height
,
repeats
=
shared
.
opts
.
training_image_repeats_per_epoch
,
placeholder_token
=
hypernetwork_name
,
model
=
shared
.
sd_model
,
cond_model
=
shared
.
sd_model
.
cond_stage_model
,
device
=
devices
.
device
,
template_file
=
template_file
,
include_cond
=
True
,
batch_size
=
batch_size
,
gradient_step
=
gradient_step
,
shuffle_tags
=
shuffle_tags
,
tag_drop_out
=
tag_drop_out
,
latent_sampling_method
=
latent_sampling_method
)
dl
=
modules
.
textual_inversion
.
dataset
.
PersonalizedDataLoader
(
ds
,
batch_size
=
ds
.
batch_size
,
pin_memory
=
pin_memory
)
latent_sampling_method
=
ds
.
latent_sampling_method
dl
=
modules
.
textual_inversion
.
dataset
.
PersonalizedDataLoader
(
ds
,
latent_sampling_method
=
latent_sampling_method
,
batch_size
=
ds
.
batch_size
,
pin_memory
=
pin_memory
)
if
unload
:
shared
.
sd_model
.
cond_stage_model
.
to
(
devices
.
cpu
)
...
...
modules/textual_inversion/dataset.py
View file @
5b57f61b
...
...
@@ -138,8 +138,11 @@ class PersonalizedBase(Dataset):
return
entry
class
PersonalizedDataLoader
(
DataLoader
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
PersonalizedDataLoader
,
self
)
.
__init__
(
shuffle
=
True
,
drop_last
=
True
,
*
args
,
**
kwargs
)
def
__init__
(
self
,
dataset
,
latent_sampling_method
=
"once"
,
batch_size
=
1
,
pin_memory
=
False
):
super
(
PersonalizedDataLoader
,
self
)
.
__init__
(
dataset
,
shuffle
=
True
,
drop_last
=
True
,
batch_size
=
batch_size
,
pin_memory
=
pin_memory
)
if
latent_sampling_method
==
"random"
:
self
.
collate_fn
=
collate_wrapper_random
else
:
self
.
collate_fn
=
collate_wrapper
...
...
@@ -148,6 +151,8 @@ class BatchLoader:
self
.
cond_text
=
[
entry
.
cond_text
for
entry
in
data
]
self
.
cond
=
[
entry
.
cond
for
entry
in
data
]
self
.
latent_sample
=
torch
.
stack
([
entry
.
latent_sample
for
entry
in
data
])
.
squeeze
(
1
)
#self.emb_index = [entry.emb_index for entry in data]
#print(self.latent_sample.device)
def
pin_memory
(
self
):
self
.
latent_sample
=
self
.
latent_sample
.
pin_memory
()
...
...
@@ -155,3 +160,13 @@ class BatchLoader:
def
collate_wrapper
(
batch
):
return
BatchLoader
(
batch
)
class
BatchLoaderRandom
(
BatchLoader
):
def
__init__
(
self
,
data
):
super
()
.
__init__
(
data
)
def
pin_memory
(
self
):
return
self
def
collate_wrapper_random
(
batch
):
return
BatchLoaderRandom
(
batch
)
\ No newline at end of file
modules/textual_inversion/textual_inversion.py
View file @
5b57f61b
...
...
@@ -277,7 +277,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
latent_sampling_method
=
ds
.
latent_sampling_method
dl
=
modules
.
textual_inversion
.
dataset
.
PersonalizedDataLoader
(
ds
,
batch_size
=
ds
.
batch_size
,
pin_memory
=
False
)
dl
=
modules
.
textual_inversion
.
dataset
.
PersonalizedDataLoader
(
ds
,
latent_sampling_method
=
latent_sampling_method
,
batch_size
=
ds
.
batch_size
,
pin_memory
=
pin_memory
)
if
unload
:
shared
.
sd_model
.
first_stage_model
.
to
(
devices
.
cpu
)
...
...
@@ -333,11 +333,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
# go back until we reach gradient accumulation steps
if
(
j
+
1
)
%
gradient_step
!=
0
:
continue
#print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
#scaler.unscale_(optimizer)
#print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
#torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=1.0)
#print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
scaler
.
step
(
optimizer
)
scaler
.
update
()
embedding
.
step
+=
1
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment