Skip to content

Commit 9758156

Browse files
committed
fix: remove linter errors
1 parent d84db51 commit 9758156

1 file changed

Lines changed: 8 additions & 9 deletions

File tree

DPF/filters/videos/lita_filter.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
)
2424
from transformers import (
2525
AutoConfig,
26-
AutoModelForCausalLM,
2726
AutoTokenizer,
2827
BitsAndBytesConfig,
2928
)
@@ -64,11 +63,11 @@ def load_pretrained_model(model_path: str,
6463
kwargs['torch_dtype'] = torch.float16 # type: ignore
6564

6665
if 'lita' not in model_name.lower():
67-
warnings.warn("this function is for loading LITA models")
66+
warnings.warn("this function is for loading LITA models", stacklevel=2)
6867
if 'lora' in model_name.lower():
69-
warnings.warn("lora is currently not supported for LITA")
68+
warnings.warn("lora is currently not supported for LITA", stacklevel=2)
7069
if 'mpt' in model_name.lower():
71-
warnings.warn("mpt is currently not supported for LITA")
70+
warnings.warn("mpt is currently not supported for LITA", stacklevel=2)
7271

7372
if model_base is not None:
7473
print('Loading LITA from base model...')
@@ -107,26 +106,26 @@ def load_pretrained_model(model_path: str,
107106
assert num_new_tokens == 0, "time tokens should already be in the tokenizer for full finetune model"
108107

109108
if num_new_tokens > 0:
110-
warnings.warn("looking for weights in mm_projector.bin")
109+
warnings.warn("looking for weights in mm_projector.bin", stacklevel=2)
111110
assert num_new_tokens == num_time_tokens
112111
model.resize_token_embeddings(len(tokenizer))
113112
input_embeddings = model.get_input_embeddings().weight.data
114113
output_embeddings = model.get_output_embeddings().weight.data
115114
weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
116115
assert 'model.embed_tokens.weight' in weights and 'lm_head.weight' in weights
117-
116+
118117
dtype = input_embeddings.dtype
119118
device = input_embeddings.device
120-
119+
121120
tokenizer_time_token_ids = tokenizer.convert_tokens_to_ids(time_tokens)
122121
time_token_ids = getattr(model.config, 'time_token_ids', tokenizer_time_token_ids)
123122
input_embeddings[tokenizer_time_token_ids] = weights['model.embed_tokens.weight'][time_token_ids].to(dtype).to(device)
124123
output_embeddings[tokenizer_time_token_ids] = weights['lm_head.weight'][time_token_ids].to(dtype).to(device)
125-
124+
126125
if hasattr(model.config, "max_sequence_length"):
127126
context_len = model.config.max_sequence_length
128127
else:
129-
context_len = 2048
128+
context_len = 2048
130129
return tokenizer, model, image_processor, context_len
131130

132131

0 commit comments

Comments
 (0)