Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

ZeroDivisionError: division by zero #762

@ei-grad

Description

@ei-grad
Python 3.10.11 (main, Apr 20 2023, 19:02:41) [GCC 11.2.0]
Type 'copyright', 'credits' or 'license' for more information
IPython 8.17.2 -- An enhanced Interactive Python. Type '?' for help.

In [1]: from transformers import AutoTokenizer, TextStreamer
   ...: from intel_extension_for_transformers.transformers import AutoModelForCausalLM
   ...: 
   ...: model_name = "Intel/neural-chat-7b-v1-1"     # Hugging Face model_id or local 
   ...: model
   ...: prompt = "Once upon a time, there existed a little girl,"
   ...: 
   ...: tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
   ...: inputs = tokenizer(prompt, return_tensors="pt").input_ids
   ...: streamer = TextStreamer(tokenizer)
   ...: 
   ...: model = AutoModelForCausalLM.from_pretrained(model_name, load_in_4bit=True)
   ...: outputs = model.generate(inputs, streamer=streamer, max_new_tokens=300)
None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.

---------------------------------------------------------------------------
ZeroDivisionError                         Traceback (most recent call last)
Cell In[1], line 2
      1 from transformers import AutoTokenizer, TextStreamer
----> 2 from intel_extension_for_transformers.transformers import AutoModelForCausalLM
      3 model_name = "Intel/neural-chat-7b-v1-1"     # Hugging Face model_id or local model
      4 prompt = "Once upon a time, there existed a little girl,"

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/intel_extension_for_transformers/transformers/__init__.py:19
      1 #!/usr/bin/env python
      2 # -*- coding: utf-8 -*-
      3 #
   (...)
     15 # See the License for the specific language governing permissions and
     16 # limitations under the License.
---> 19 from .config import (
     20     WEIGHTS_NAME,
     21     AutoDistillationConfig,
     22     BenchmarkConfig,
     23     DistillationConfig,
     24     DynamicLengthConfig,
     25     FlashDistillationConfig,
     26     NASConfig,
     27     Provider,
     28     PrunerV2,
     29     PruningConfig,
     30     QuantizationConfig,
     31     TFDistillationConfig,
     32 )
     33 from .distillation import (
     34     SUPPORTED_DISTILLATION_CRITERION_MODE,
     35     DistillationCriterionMode,
     36 )
     37 from .mixture.auto_distillation import AutoDistillation

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/intel_extension_for_transformers/transformers/config.py:22
     20 import yaml
     21 from enum import Enum
---> 22 from neural_compressor.conf.config import (
     23     Distillation_Conf, Pruner, Pruning_Conf, Quantization_Conf
     24 )
     25 from neural_compressor.conf.dotdict import DotDict, deep_set
     26 from .utils.metrics import Metric

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/__init__.py:28
     20 # we need to set a global 'NA' backend, or Model can't be used
     21 from .config import (
     22     DistillationConfig,
     23     PostTrainingQuantConfig,
   (...)
     26     MixedPrecisionConfig,
     27 )
---> 28 from .contrib import *
     29 from .model import *
     30 from .metric import *

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/contrib/__init__.py:18
      1 #!/usr/bin/env python
      2 # -*- coding: utf-8 -*-
      3 #
   (...)
     15 # See the License for the specific language governing permissions and
     16 # limitations under the License.
     17 """Built-in strategy for multiple framework backends."""
---> 18 from .strategy import *

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/contrib/strategy/__init__.py:25
     23 for f in modules:
     24     if isfile(f) and not f.startswith("__") and not f.endswith("__init__.py"):
---> 25         __import__(basename(f)[:-3], globals(), locals(), level=1)

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/contrib/strategy/tpe.py:26
     22 from pathlib import Path
     24 import numpy as np
---> 26 from neural_compressor.strategy.strategy import TuneStrategy, strategy_registry
     27 from neural_compressor.strategy.utils.tuning_sampler import OpWiseTuningSampler
     28 from neural_compressor.strategy.utils.tuning_structs import OpTuningConfig

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/strategy/__init__.py:19
      1 #!/usr/bin/env python
      2 # -*- coding: utf-8 -*-
      3 #
   (...)
     15 # See the License for the specific language governing permissions and
     16 # limitations under the License.
     17 """Intel Neural Compressor Strategy."""
---> 19 from .strategy import STRATEGIES
     20 from os.path import dirname, basename, isfile, join
     21 import glob

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/strategy/strategy.py:35
     32 import numpy as np
     33 import yaml
---> 35 from neural_compressor.adaptor.tensorflow import TensorFlowAdaptor
     37 from ..adaptor import FRAMEWORKS
     38 from ..algorithm import ALGORITHMS, AlgorithmScheduler

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/adaptor/__init__.py:26
     24 for f in modules:
     25     if isfile(f) and not f.startswith("__") and not f.endswith("__init__.py"):
---> 26         __import__(basename(f)[:-3], globals(), locals(), level=1)
     28 __all__ = ["FRAMEWORKS"]

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/adaptor/pytorch.py:43
     40 torch_utils = LazyImport("neural_compressor.adaptor.torch_utils")
     41 ipex = LazyImport("intel_extension_for_pytorch")
---> 43 REDUCE_RANGE = False if CpuInfo().vnni else True
     44 logger.debug("Reduce range is {}".format(str(REDUCE_RANGE)))
     47 def get_torch_version():

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/utils/utility.py:129, in singleton.<locals>._singleton(*args, **kw)
    127 """Create a singleton object."""
    128 if cls not in instances:
--> 129     instances[cls] = cls(*args, **kw)
    130 return instances[cls]

File ~/.virtualenvs/intel-transformers/lib/python3.10/site-packages/neural_compressor/utils/utility.py:254, in CpuInfo.__init__(self)
    252 self._sockets = self.get_number_of_sockets()
    253 self._cores = psutil.cpu_count(logical=False)
--> 254 self._cores_per_socket = int(self._cores / self._sockets)

ZeroDivisionError: division by zero

Metadata

Metadata

Labels

No labels
No labels

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions