Commit ab05a74e authored by Muhammad Rizqi Nur's avatar Muhammad Rizqi Nur

Revert "Add cleanup after training"

This reverts commit 3ce2bfdf.
parent a27d19de
...@@ -398,112 +398,110 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ...@@ -398,112 +398,110 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
forced_filename = "<none>" forced_filename = "<none>"
pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
for i, entries in pbar:
try: hypernetwork.step = i + ititial_step
for i, entries in pbar: if len(loss_dict) > 0:
hypernetwork.step = i + ititial_step previous_mean_losses = [i[-1] for i in loss_dict.values()]
if len(loss_dict) > 0: previous_mean_loss = mean(previous_mean_losses)
previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = stack_conds([entry.cond for entry in entries]).to(devices.device)
# c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
del c
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
if weights[0].grad is None:
steps_without_grad += 1
else:
steps_without_grad = 0
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
optimizer.step()
steps_done = hypernetwork.step + 1
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
if len(previous_mean_losses) > 1: scheduler.apply(optimizer, hypernetwork.step)
std = stdev(previous_mean_losses) if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = stack_conds([entry.cond for entry in entries]).to(devices.device)
# c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
del c
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
if weights[0].grad is None:
steps_without_grad += 1
else: else:
std = 0 steps_without_grad = 0
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
pbar.set_description(dataset_loss_info)
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork.name = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(last_saved_file)
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
optimizer.zero_grad()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
)
if preview_from_txt2img: optimizer.step()
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
preview_text = p.prompt steps_done = hypernetwork.step + 1
processed = processing.process_images(p) if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
image = processed.images[0] if len(processed.images)>0 else None raise RuntimeError("Loss diverged.")
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
std = 0
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
pbar.set_description(dataset_loss_info)
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork.name = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(last_saved_file)
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
optimizer.zero_grad()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
if unload: p = processing.StableDiffusionProcessingTxt2Img(
shared.sd_model.cond_stage_model.to(devices.cpu) sd_model=shared.sd_model,
shared.sd_model.first_stage_model.to(devices.cpu) do_not_save_grid=True,
do_not_save_samples=True,
)
if image is not None: if preview_from_txt2img:
shared.state.current_image = image p.prompt = preview_prompt
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) p.negative_prompt = preview_negative_prompt
last_saved_image += f", prompt: {preview_text}" p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images)>0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
shared.state.job_no = hypernetwork.step if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.textinfo = f""" shared.state.job_no = hypernetwork.step
shared.state.textinfo = f"""
<p> <p>
Loss: {previous_mean_loss:.7f}<br/> Loss: {previous_mean_loss:.7f}<br/>
Step: {hypernetwork.step}<br/> Step: {hypernetwork.step}<br/>
...@@ -512,14 +510,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}<br/> ...@@ -512,14 +510,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/> Last saved image: {html.escape(last_saved_image)}<br/>
</p> </p>
""" """
finally:
if weights:
for weight in weights:
weight.requires_grad = False
if unload:
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
report_statistics(loss_dict) report_statistics(loss_dict)
checkpoint = sd_models.select_checkpoint() checkpoint = sd_models.select_checkpoint()
......
...@@ -283,113 +283,111 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc ...@@ -283,113 +283,111 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
embedding_yet_to_be_embedded = False embedding_yet_to_be_embedded = False
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
for i, entries in pbar:
embedding.step = i + ititial_step
try: scheduler.apply(optimizer, embedding.step)
for i, entries in pbar: if scheduler.finished:
embedding.step = i + ititial_step break
scheduler.apply(optimizer, embedding.step) if shared.state.interrupted:
if scheduler.finished: break
break
with torch.autocast("cuda"):
if shared.state.interrupted: c = cond_model([entry.cond_text for entry in entries])
break x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
with torch.autocast("cuda"): del x
c = cond_model([entry.cond_text for entry in entries])
x = torch.stack([entry.latent for entry in entries]).to(devices.device) losses[embedding.step % losses.shape[0]] = loss.item()
loss = shared.sd_model(x, c)[0]
del x optimizer.zero_grad()
loss.backward()
losses[embedding.step % losses.shape[0]] = loss.item() optimizer.step()
optimizer.zero_grad() steps_done = embedding.step + 1
loss.backward()
optimizer.step() epoch_num = embedding.step // len(ds)
epoch_step = embedding.step % len(ds)
steps_done = embedding.step + 1
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}")
epoch_num = embedding.step // len(ds)
epoch_step = embedding.step % len(ds) if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") embedding.name = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
if embedding_dir is not None and steps_done % save_embedding_every == 0: embedding.save(last_saved_file)
# Before saving, change name to match current checkpoint. embedding_yet_to_be_embedded = True
embedding.name = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
embedding.save(last_saved_file) "loss": f"{losses.mean():.7f}",
embedding_yet_to_be_embedded = True "learn_rate": scheduler.learn_rate
})
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
"loss": f"{losses.mean():.7f}", if images_dir is not None and steps_done % create_image_every == 0:
"learn_rate": scheduler.learn_rate forced_filename = f'{embedding_name}-{steps_done}'
}) last_saved_image = os.path.join(images_dir, forced_filename)
p = processing.StableDiffusionProcessingTxt2Img(
if images_dir is not None and steps_done % create_image_every == 0: sd_model=shared.sd_model,
forced_filename = f'{embedding_name}-{steps_done}' do_not_save_grid=True,
last_saved_image = os.path.join(images_dir, forced_filename) do_not_save_samples=True,
p = processing.StableDiffusionProcessingTxt2Img( do_not_reload_embeddings=True,
sd_model=shared.sd_model, )
do_not_save_grid=True,
do_not_save_samples=True, if preview_from_txt2img:
do_not_reload_embeddings=True, p.prompt = preview_prompt
) p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
if preview_from_txt2img: p.sampler_index = preview_sampler_index
p.prompt = preview_prompt p.cfg_scale = preview_cfg_scale
p.negative_prompt = preview_negative_prompt p.seed = preview_seed
p.steps = preview_steps p.width = preview_width
p.sampler_index = preview_sampler_index p.height = preview_height
p.cfg_scale = preview_cfg_scale else:
p.seed = preview_seed p.prompt = entries[0].cond_text
p.width = preview_width p.steps = 20
p.height = preview_height p.width = training_width
else: p.height = training_height
p.prompt = entries[0].cond_text
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt preview_text = p.prompt
processed = processing.process_images(p) processed = processing.process_images(p)
image = processed.images[0] image = processed.images[0]
shared.state.current_image = image shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
info = PngImagePlugin.PngInfo() info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file) data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data)) info.add_text("sd-ti-embedding", embedding_to_b64(data))
title = "<{}>".format(data.get('name', '???')) title = "<{}>".format(data.get('name', '???'))
try: try:
vectorSize = list(data['string_to_param'].values())[0].shape[0] vectorSize = list(data['string_to_param'].values())[0].shape[0]
except Exception as e: except Exception as e:
vectorSize = '?' vectorSize = '?'
checkpoint = sd_models.select_checkpoint() checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash) footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, steps_done) footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data) captioned_image = insert_image_data_embed(captioned_image, data)
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False embedding_yet_to_be_embedded = False
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}" last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step shared.state.job_no = embedding.step
shared.state.textinfo = f""" shared.state.textinfo = f"""
<p> <p>
Loss: {losses.mean():.7f}<br/> Loss: {losses.mean():.7f}<br/>
Step: {embedding.step}<br/> Step: {embedding.step}<br/>
...@@ -398,9 +396,6 @@ Last saved embedding: {html.escape(last_saved_file)}<br/> ...@@ -398,9 +396,6 @@ Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/> Last saved image: {html.escape(last_saved_image)}<br/>
</p> </p>
""" """
finally:
if embedding and embedding.vec is not None:
embedding.vec.requires_grad = False
checkpoint = sd_models.select_checkpoint() checkpoint = sd_models.select_checkpoint()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment