Skip to content

Commit 12769b3

Browse files
committed
debugging why it don't work
1 parent b8e1151 commit 12769b3

File tree

2 files changed

+6
-3
lines changed

2 files changed

+6
-3
lines changed

ldm/modules/embedding_manager.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,8 @@ def forward(
140140
tokenized_text,
141141
embedded_text,
142142
):
143+
# torch.save(embedded_text, '/tmp/embedding-manager-uglysonic-pre-rewrite.pt')
144+
143145
b, n, device = *tokenized_text.shape, tokenized_text.device
144146

145147
for (
@@ -241,7 +243,7 @@ def load(self, ckpt_paths, full=True):
241243
# both will be stored in this dictionary
242244
for term in self.string_to_param_dict.keys():
243245
term = term.strip('<').strip('>')
244-
self.concepts_loaded[term] = True
246+
self.concepts_loaded[term] = True
245247
print(f'>> Current embedding manager terms: {", ".join(self.string_to_param_dict.keys())}')
246248

247249
def _expand_directories(self, paths:list[str]):
@@ -262,7 +264,7 @@ def _load(self, ckpt_path, full=True):
262264
print(f'\n### Security Issues Found in Model: {scan_result.issues_count}')
263265
print('### For your safety, InvokeAI will not load this embed.')
264266
return
265-
267+
266268
ckpt = torch.load(ckpt_path, map_location='cpu')
267269

268270
# Handle .pt textual inversion files

ldm/modules/encoders/modules.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ def forward(self, text: list, **kwargs):
548548

549549
#print(f"assembled tokens for '{fragments}' into tensor of shape {lerped_embeddings.shape}")
550550

551-
# append to batch
551+
# append to batch
552552
batch_z = lerped_embeddings.unsqueeze(0) if batch_z is None else torch.cat([batch_z, lerped_embeddings.unsqueeze(0)], dim=1)
553553
batch_tokens = tokens.unsqueeze(0) if batch_tokens is None else torch.cat([batch_tokens, tokens.unsqueeze(0)], dim=1)
554554

@@ -647,6 +647,7 @@ def build_weighted_embedding_tensor(self, tokens: torch.Tensor, per_token_weight
647647
'''
648648
#print(f"building weighted embedding tensor for {tokens} with weights {per_token_weights}")
649649
z = self.transformer(input_ids=tokens.unsqueeze(0), **kwargs)
650+
# torch.save(z, '/tmp/embedding-manager-uglysonic-post-rewrite.pt')
650651
batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape)
651652

652653
if weight_delta_from_empty:

0 commit comments

Comments
 (0)