diff --git a/examples/BuddyLeNet/CMakeLists.txt b/examples/BuddyLeNet/CMakeLists.txt index 5935ad50c5..1902384f92 100644 --- a/examples/BuddyLeNet/CMakeLists.txt +++ b/examples/BuddyLeNet/CMakeLists.txt @@ -52,29 +52,6 @@ add_custom_command( set(ONE_SHOT_BUFFERIZE_OPTION "bufferize-function-boundaries=1 function-boundary-type-conversion=identity-layout-map") set(LOWER_TO_NVVM_OPTION "cubin-chip=sm_80 cubin-features=+ptx71 cubin-format=fatbin") -# add_custom_command( -# OUTPUT subgraph0.o -# COMMAND ${LLVM_TOOLS_BINARY_DIR}/mlir-opt ${BUDDY_EXAMPLES_DIR}/BuddyLeNet/subgraph0.mlir -# -pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" | -# ${LLVM_TOOLS_BINARY_DIR}/mlir-opt -# -one-shot-bufferize=${ONE_SHOT_BUFFERIZE_OPTION} -# -buffer-deallocation -# -convert-linalg-to-parallel-loops -# -canonicalize -# -gpu-map-parallel-loops -# -convert-parallel-loops-to-gpu -# -gpu-kernel-outlining -# -canonicalize -# -cse | -# ${BUDDY_BINARY_DIR}/buddy-opt -convert-memcpy-to-gpu -gpu-async-region -canonicalize | -# ${LLVM_TOOLS_BINARY_DIR}/mlir-opt -llvm-request-c-wrappers --test-lower-to-nvvm=${LOWER_TO_NVVM_OPTION} | -# ${LLVM_TOOLS_BINARY_DIR}/mlir-translate -mlir-to-llvmir | -# ${LLVM_TOOLS_BINARY_DIR}/llvm-as | -# ${LLVM_TOOLS_BINARY_DIR}/llc -filetype=obj -relocation-model=pic -O0 -o ${BUDDY_BINARY_DIR}/../examples/BuddyLeNet/subgraph0.o -# DEPENDS ${BUDDY_EXAMPLES_DIR}/BuddyLeNet/subgraph0.mlir -# COMMENT "Building subgraph0.o" -# VERBATIM) - add_custom_command( OUTPUT subgraph1.o COMMAND ${LLVM_TOOLS_BINARY_DIR}/mlir-opt ${BUDDY_EXAMPLES_DIR}/BuddyLeNet/subgraph1.mlir diff --git a/frontend/Python/graph/json_decoder.py b/frontend/Python/graph/json_decoder.py index cfa825b0aa..f3a11440ac 100644 --- a/frontend/Python/graph/json_decoder.py +++ b/frontend/Python/graph/json_decoder.py @@ -1,3 +1,22 @@ +# ===- json_decoder.py --------------------------------------------------------- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ===--------------------------------------------------------------------------- +# +# This converts the JSON string representing Buddy Graph into a Graph object. +# +# ===--------------------------------------------------------------------------- import json from pathlib import Path @@ -11,6 +30,7 @@ from ..ops.math import ops_registry as math_ops_registry from ..ops.func import ops_registry as func_ops_registry + def json_to_graph(json_str): """ Converts a buddy graph JSON string to a Graph object. @@ -21,6 +41,7 @@ def json_to_graph(json_str): Returns: Graph: The Graph object created from the JSON data. """ + def json_to_tensormeta(json_data): """ Convert JSON data to a TensorMeta object. @@ -31,68 +52,77 @@ def json_to_tensormeta(json_data): Returns: TensorMeta: The TensorMeta object created from the JSON data. """ - if 'shape' in json_data: - shape = json_data['shape'] + if "shape" in json_data: + shape = json_data["shape"] dtype = next( - (member for member in TensorDType.__members__.values() - if member.value.upper() == json_data['dtype'].upper()), None + ( + member + for member in TensorDType.__members__.values() + if member.value.upper() == json_data["dtype"].upper() + ), + None, ) return TensorMeta(shape, dtype) return {} - + json_data = json.loads(json_str) _graph = json_data - graph_name = _graph['graph_name'] + graph_name = _graph["graph_name"] inputs = [] params = [] - for _input in _graph['inputs']: + for _input in _graph["inputs"]: inputs.append(json_to_tensormeta(_input)) - for _param in _graph['params']: + for _param in _graph["params"]: params.append(json_to_tensormeta(_param)) ops_registry = {} ops_registry.update(func_ops_registry) ops_registry.update(linalg_ops_registry) ops_registry.update(tosa_ops_registry) ops_registry.update(math_ops_registry) - graph = Graph( - inputs, - params, - ops_registry, - graph_name - ) - graph.device = _graph['device'] - for _node in _graph['nodes']: - op_class = _node['class'] + graph = Graph(inputs, params, ops_registry, graph_name) + graph.device = _graph["device"] + for _node in _graph["nodes"]: + op_class = _node["class"] op = globals()[op_class]() - op._name = _node['name'] - op._children = _node['children'] - op._parents = _node['parents'] - op._arguments = _node['arguments'] - op._keyword_arguments = _node['keyword_arguments'] + op._name = _node["name"] + op._children = _node["children"] + op._parents = _node["parents"] + op._arguments = _node["arguments"] + op._keyword_arguments = _node["keyword_arguments"] op._type = next( - (member for member in OpType.__members__.values() if member.value == _node['type']), None + ( + member + for member in OpType.__members__.values() + if member.value == _node["type"] + ), + None, ) # TODO : node attr tensor_meta should be Class TensorMeta - if ('shape' not in _node['tensor_meta']): - op._tensor_meta = _node['tensor_meta'] + if "shape" not in _node["tensor_meta"]: + op._tensor_meta = _node["tensor_meta"] else: op._tensor_meta = { - 'shape' : _node['tensor_meta']['shape'], - 'dtype' : next( - (member for member in TensorDType.__members__.values() - if member.value.upper() == _node['tensor_meta']['dtype'].upper()), None - ) + "shape": _node["tensor_meta"]["shape"], + "dtype": next( + ( + member + for member in TensorDType.__members__.values() + if member.value.upper() + == _node["tensor_meta"]["dtype"].upper() + ), + None, + ), } graph.add_node(op) - for i, device in enumerate(list(set(_graph['node_map_device'].values()))): + for i, device in enumerate(list(set(_graph["node_map_device"].values()))): subgraph_name = "subgraph{}".format(i) graph.op_groups[subgraph_name] = [] graph.group_map_device[subgraph_name] = DeviceType(device) - for node, op_device in _graph['node_map_device'].items(): + for node, op_device in _graph["node_map_device"].items(): op = graph.node_table[node] for subgraph_name, group_device in graph.group_map_device.items(): if op_device == group_device.value: diff --git a/midend/lib/Conversion/MLIRGPU/ConvertMemcpyToGPU.cpp b/midend/lib/Conversion/MLIRGPU/ConvertMemcpyToGPU.cpp index f616127930..e44f21cb6e 100644 --- a/midend/lib/Conversion/MLIRGPU/ConvertMemcpyToGPU.cpp +++ b/midend/lib/Conversion/MLIRGPU/ConvertMemcpyToGPU.cpp @@ -211,4 +211,4 @@ void registerConvertMemcpyToGPUPass() { PassRegistration(); } } // namespace buddy -} // namespace mlir \ No newline at end of file +} // namespace mlir diff --git a/tests/Conversion/convert-memcpy-to-gpu.mlir b/tests/Conversion/convert-memcpy-to-gpu.mlir index 573000a4b5..65e9301e4a 100644 --- a/tests/Conversion/convert-memcpy-to-gpu.mlir +++ b/tests/Conversion/convert-memcpy-to-gpu.mlir @@ -66,4 +66,4 @@ module attributes {gpu.container_module} { gpu.return } } -} \ No newline at end of file +}