You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/var/lib/workspace/prototype_source/gpu_quantization_torchao_tutorial.py failed leaving traceback:
Traceback (most recent call last):
File "/var/lib/workspace/prototype_source/gpu_quantization_torchao_tutorial.py", line 194, in <module>
quant_res = benchmark(model_c, image)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/var/lib/workspace/prototype_source/gpu_quantization_torchao_tutorial.py", line 63, in benchmark
f(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/eval_frame.py", line 573, in _fn
return fn(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 1380, in __call__
return self._torchdynamo_orig_callable(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 1164, in __call__
result = self._inner_convert(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 547, in __call__
return _compile(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 986, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 715, in compile_inner
return _compile_inner(code, one_graph, hooks, transform)
File "/usr/local/lib/python3.10/dist-packages/torch/_utils_internal.py", line 95, in wrapper_function
return function(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 750, in _compile_inner
out_code = transform_code_object(code, transform)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/bytecode_transformation.py", line 1361, in transform_code_object
transformations(instructions, code_options)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 231, in _fn
return fn(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/convert_frame.py", line 662, in transform
tracer.run()
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/symbolic_convert.py", line 2868, in run
super().run()
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/symbolic_convert.py", line 1052, in run
while self.step():
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/symbolic_convert.py", line 962, in step
self.dispatch_table[inst.opcode](self, inst)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/symbolic_convert.py", line 3048, in RETURN_VALUE
self._return(inst)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/symbolic_convert.py", line 3033, in _return
self.output.compile_subgraph(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/output_graph.py", line 1101, in compile_subgraph
self.compile_and_call_fx_graph(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/output_graph.py", line 1382, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/output_graph.py", line 1432, in call_user_compiler
return self._call_user_compiler(gm)
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/output_graph.py", line 1483, in _call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/output_graph.py", line 1462, in _call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/repro/after_dynamo.py", line 130, in __call__
compiled_gm = compiler_fn(gm, example_inputs)
File "/usr/local/lib/python3.10/dist-packages/torch/__init__.py", line 2314, in __call__
return compile_fx(model_, inputs_, config_patches=self.config)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 1552, in compile_fx
return compile_fx(
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 1863, in compile_fx
return aot_autograd(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/backends/common.py", line 83, in __call__
cg = aot_module_simplified(gm, example_inputs, **self.kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/aot_autograd.py", line 1155, in aot_module_simplified
compiled_fn = dispatch_and_compile()
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/aot_autograd.py", line 1131, in dispatch_and_compile
compiled_fn, _ = create_aot_dispatcher_function(
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/aot_autograd.py", line 580, in create_aot_dispatcher_function
return _create_aot_dispatcher_function(
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/aot_autograd.py", line 830, in _create_aot_dispatcher_function
compiled_fn, fw_metadata = compiler_fn(
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py", line 203, in aot_dispatch_base
compiled_fw = compiler(fw_module, updated_flat_args)
File "/usr/local/lib/python3.10/dist-packages/torch/_functorch/aot_autograd.py", line 489, in __call__
return self.compiler_fn(gm, example_inputs)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 1741, in fw_compiler_base
return inner_compile(
File "/usr/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 569, in compile_fx_inner
return wrap_compiler_debug(_compile_fx_inner, compiler_name="inductor")(
File "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/repro/after_aot.py", line 102, in debug_wrapper
inner_compiled_fn = compiler_fn(gm, example_inputs)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 685, in _compile_fx_inner
mb_compiled_graph = fx_codegen_and_compile(
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 1129, in fx_codegen_and_compile
return scheme.codegen_and_compile(gm, example_inputs, inputs_to_check, graph_kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/compile_fx.py", line 1044, in codegen_and_compile
compiled_fn = graph.compile_to_module().call
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/graph.py", line 2027, in compile_to_module
return self._compile_to_module()
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/graph.py", line 2033, in _compile_to_module
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/graph.py", line 1964, in codegen
self.scheduler = Scheduler(self.operations)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 1798, in __init__
self._init(nodes)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 1870, in _init
self.nodes = self.fuse_nodes(self.nodes)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 2377, in fuse_nodes
nodes = self.fuse_nodes_once(nodes)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 2674, in fuse_nodes_once
if not self.speedup_by_fusion(node1, node2):
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 2597, in speedup_by_fusion
ms_fused, _ = self.benchmark_fused_nodes(node_list_fused)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 2415, in benchmark_fused_nodes
return backend.benchmark_fused_nodes(nodes)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/cuda_combined_scheduling.py", line 92, in benchmark_fused_nodes
return self._triton_scheduling.benchmark_fused_nodes(nodes)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/triton.py", line 3628, in benchmark_fused_nodes
src_code = self.generate_kernel_code_from_nodes(
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/simd.py", line 1699, in generate_kernel_code_from_nodes
src_code = self.codegen_template(
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/simd.py", line 1374, in codegen_template
node.codegen(kernel.split_and_set_ranges(node.get_ranges()))
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/scheduler.py", line 1057, in codegen
self._body(*index_vars)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/loop_body.py", line 404, in __call__
result = self.root_block()
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/loop_body.py", line 638, in __call__
return InterpreterShim(graph, submodules).run(V.get_ops_handler())
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/loop_body.py", line 60, in run
return super().run(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/fx/interpreter.py", line 167, in run
self.env[node] = self.run_node(node)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/loop_body.py", line 56, in run_node
return super().run_node(n)
File "/usr/local/lib/python3.10/dist-packages/torch/fx/interpreter.py", line 230, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/fx/interpreter.py", line 334, in call_method
return getattr(self_obj, target)(*args_tail, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/common.py", line 1863, in inner
return pytree.tree_map(do_cse, value)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_pytree.py", line 991, in tree_map
return treespec.unflatten(map(func, *flat_args))
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_pytree.py", line 830, in unflatten
leaves = list(leaves)
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/codegen/common.py", line 1828, in do_cse
output_dtype = getattr(
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/dtype_propagation.py", line 226, in mul
return promote_types([a, b])
File "/usr/local/lib/python3.10/dist-packages/torch/_inductor/dtype_propagation.py", line 78, in promote_types
assert isinstance(
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
AssertionError:
Add Link
https://pytorch.org/tutorials/prototype/gpu_quantization_torchao_tutorial.html
Describe the bug
The tutorial is failing with the following error:
build log
Please submit fixes against the 2.6-RC-TEST branch and enable in .jenkins/validate_tutorials_built.py
CC: @HDCharles @eellison @driss
Describe your environment
The text was updated successfully, but these errors were encountered: