How to load without lmdeploy?
#1
by
matatonic
- opened
Is there an example for just transformers?
Thanks for this model, it's really great!
The lmdeploy official docker is also failing to start this model "ModuleNotFoundError: No module named 'timm'"
Try to install a timm?
Like this:
pip install timm==0.9.12
I did try that, I got new errors.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Traceback (most recent call last):
File "/opt/py38/bin/lmdeploy", line 11, in <module>
load_entry_point('lmdeploy', 'console_scripts', 'lmdeploy')()
File "/opt/lmdeploy/lmdeploy/cli/entrypoint.py", line 37, in run
args.run(args)
File "/opt/lmdeploy/lmdeploy/cli/serve.py", line 303, in api_server
run_api_server(args.model_path,
File "/opt/lmdeploy/lmdeploy/serve/openai/api_server.py", line 1191, in serve
VariableInterface.async_engine = pipeline_class(
File "/opt/lmdeploy/lmdeploy/serve/vl_async_engine.py", line 21, in __init__
super().__init__(model_path, **kwargs)
File "/opt/lmdeploy/lmdeploy/serve/async_engine.py", line 206, in __init__
self._build_turbomind(model_path=model_path,
File "/opt/lmdeploy/lmdeploy/serve/async_engine.py", line 253, in _build_turbomind
self.engine = tm.TurboMind.from_pretrained(
File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 387, in from_pretrained
return cls(model_path=pretrained_model_name_or_path,
File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 161, in __init__
self.model_comm = self._from_hf(model_source=model_source,
File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 270, in _from_hf
output_model = OUTPUT_MODELS.get(output_format)(
File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/fp.py", line 26, in __init__
super().__init__(input_model, cfg, to_file, out_dir)
File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/base.py", line 156, in __init__
self.cfg = self.get_config(cfg)
File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/fp.py", line 38, in get_config
w1, _, _ = bin.ffn(i)
File "/opt/lmdeploy/lmdeploy/turbomind/deploy/source_model/internlm2.py", line 69, in ffn
return self._ffn(i, 'weight')
File "/opt/lmdeploy/lmdeploy/turbomind/deploy/source_model/internlm2.py", line 62, in _ffn
tensor = self.params[
KeyError: 'language_model.model.layers.0.feed_forward.w1.weight'
I would still also prefer to run it directly without lmdeploy if possible, thanks!
I did try that, I got new errors.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. Traceback (most recent call last): File "/opt/py38/bin/lmdeploy", line 11, in <module> load_entry_point('lmdeploy', 'console_scripts', 'lmdeploy')() File "/opt/lmdeploy/lmdeploy/cli/entrypoint.py", line 37, in run args.run(args) File "/opt/lmdeploy/lmdeploy/cli/serve.py", line 303, in api_server run_api_server(args.model_path, File "/opt/lmdeploy/lmdeploy/serve/openai/api_server.py", line 1191, in serve VariableInterface.async_engine = pipeline_class( File "/opt/lmdeploy/lmdeploy/serve/vl_async_engine.py", line 21, in __init__ super().__init__(model_path, **kwargs) File "/opt/lmdeploy/lmdeploy/serve/async_engine.py", line 206, in __init__ self._build_turbomind(model_path=model_path, File "/opt/lmdeploy/lmdeploy/serve/async_engine.py", line 253, in _build_turbomind self.engine = tm.TurboMind.from_pretrained( File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 387, in from_pretrained return cls(model_path=pretrained_model_name_or_path, File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 161, in __init__ self.model_comm = self._from_hf(model_source=model_source, File "/opt/lmdeploy/lmdeploy/turbomind/turbomind.py", line 270, in _from_hf output_model = OUTPUT_MODELS.get(output_format)( File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/fp.py", line 26, in __init__ super().__init__(input_model, cfg, to_file, out_dir) File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/base.py", line 156, in __init__ self.cfg = self.get_config(cfg) File "/opt/lmdeploy/lmdeploy/turbomind/deploy/target_model/fp.py", line 38, in get_config w1, _, _ = bin.ffn(i) File "/opt/lmdeploy/lmdeploy/turbomind/deploy/source_model/internlm2.py", line 69, in ffn return self._ffn(i, 'weight') File "/opt/lmdeploy/lmdeploy/turbomind/deploy/source_model/internlm2.py", line 62, in _ffn tensor = self.params[ KeyError: 'language_model.model.layers.0.feed_forward.w1.weight'
Can you provide more details including test code and environment configuration?
I would still also prefer to run it directly without lmdeploy if possible, thanks!
Our awq model is generated by lmdeploy. If you want to use autoawq integrated by transformers, you need to make sure that the autoawq library supports our model.
czczup
changed discussion status to
closed