@@ -316,62 +316,6 @@ def forward(self, a):
316316 )
317317 self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
318318
319- def test_tune_add (self ):
320- class M (torch .nn .Module ):
321- def __init__ (self ):
322- super ().__init__ ()
323- self .conv = torch .nn .Conv2d (3 , 1 , 1 )
324- self .linear = torch .nn .Linear (224 * 224 , 5 )
325-
326- def forward (self , a ):
327- x = self .conv (a )
328- x = x .view (1 , - 1 )
329- x += x
330- x = self .linear (x )
331- return x
332-
333- model = M ()
334- from neural_compressor import PostTrainingQuantConfig , quantization
335-
336- acc_lst = [1 , 0.8 , 1.1 , 1.2 ]
337-
338- def fake_eval (model ):
339- res = acc_lst .pop (0 )
340- return res
341-
342- conf = PostTrainingQuantConfig (backend = "ipex" , quant_level = 0 )
343- calib_dataloader = Dataloader ()
344- q_model = quantization .fit (model , conf , calib_dataloader = calib_dataloader , eval_func = fake_eval )
345- self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
346-
347- def test_tune_add_with_recipe (self ):
348- class M (torch .nn .Module ):
349- def __init__ (self ):
350- super ().__init__ ()
351- self .linear = torch .nn .Linear (224 * 224 * 3 , 5 )
352-
353- def forward (self , x ):
354- x += x
355- x = x .view (1 , - 1 )
356- x = self .linear (x )
357- return x
358-
359- model = M ()
360- from neural_compressor import PostTrainingQuantConfig , quantization
361-
362- acc_lst = [1 , 0.8 , 1.1 , 1.2 ]
363-
364- def fake_eval (model ):
365- res = acc_lst .pop (0 )
366- return res
367-
368- conf = PostTrainingQuantConfig (
369- backend = "ipex" , quant_level = 0 , recipes = {"smooth_quant" : True , "smooth_quant_args" : {"alpha" : 0.5 }}
370- )
371- calib_dataloader = Dataloader ()
372- q_model = quantization .fit (model , conf , calib_dataloader = calib_dataloader , eval_func = fake_eval )
373- self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
374-
375319 def test_tune_minmax_obs (self ):
376320 class M (torch .nn .Module ):
377321 def __init__ (self ):
@@ -527,67 +471,6 @@ def forward(self, a):
527471 )
528472 self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
529473
530- def test_tune_add (self ):
531- class M (torch .nn .Module ):
532- def __init__ (self ):
533- super ().__init__ ()
534- self .conv = torch .nn .Conv2d (3 , 1 , 1 )
535- self .linear = torch .nn .Linear (224 * 224 , 5 )
536-
537- def forward (self , a ):
538- x = self .conv (a )
539- x = x .view (1 , - 1 )
540- x += x
541- x = self .linear (x )
542- return x
543-
544- model = M ().to ("xpu" )
545- from neural_compressor import PostTrainingQuantConfig , quantization
546-
547- acc_lst = [1 , 0.8 , 1.1 , 1.2 ]
548-
549- def fake_eval (model ):
550- res = acc_lst .pop (0 )
551- return res
552-
553- conf = PostTrainingQuantConfig (backend = "ipex" , device = "xpu" , quant_level = 0 )
554- calib_dataloader = Dataloader (device = "xpu" )
555- q_model = quantization .fit (model , conf , calib_dataloader = calib_dataloader , eval_func = fake_eval )
556- self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
557-
558- def test_tune_add_with_recipe (self ):
559- class M (torch .nn .Module ):
560- def __init__ (self ):
561- super ().__init__ ()
562- self .conv = torch .nn .Conv2d (3 , 1 , 1 )
563- self .linear = torch .nn .Linear (224 * 224 , 5 )
564-
565- def forward (self , a ):
566- x = self .conv (a )
567- x = x .view (1 , - 1 )
568- x += x
569- x = self .linear (x )
570- return x
571-
572- model = M ().to ("xpu" )
573- from neural_compressor import PostTrainingQuantConfig , quantization
574-
575- acc_lst = [1 , 0.8 , 1.1 , 1.2 ]
576-
577- def fake_eval (model ):
578- res = acc_lst .pop (0 )
579- return res
580-
581- conf = PostTrainingQuantConfig (
582- backend = "ipex" ,
583- device = "xpu" ,
584- quant_level = 0 ,
585- recipes = {"smooth_quant" : True , "smooth_quant_args" : {"alpha" : 0.5 }},
586- )
587- calib_dataloader = Dataloader (device = "xpu" )
588- q_model = quantization .fit (model , conf , calib_dataloader = calib_dataloader , eval_func = fake_eval )
589- self .assertTrue (isinstance (q_model ._model , torch .jit .ScriptModule ))
590-
591474
592475class TestMixedPrecision (unittest .TestCase ):
593476 @classmethod
0 commit comments