diff --git a/CLA.md b/CLA.md new file mode 100644 index 00000000..389a4746 --- /dev/null +++ b/CLA.md @@ -0,0 +1,58 @@ +## Individual Contributor License Agreement (CLA) + +**Thank you for submitting your contributions to this project.** + +By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions +to the project. + +### License. + +You hereby represent that all present, past and future contributions are governed by the +[MIT License](https://opensource.org/licenses/MIT) +copyright statement. + +This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights +of the code or documents you contribute to the project itself or its maintainers. +Furthermore you also represent that you have the authority to perform the above waiver +with respect to the entirety of you contributions. + +### Moral Rights. + +To the fullest extent permitted under applicable law, you hereby waive, and agree not to +assert, all of your “moral rights” in or relating to your contributions for the benefit of the project. + +### Third Party Content. + +If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, +specifications, documentation, data, materials, feedback, information or other works of authorship that were not +authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary +rights associated with your Contribution (“Third Party Rights”), +then you agree to include with the submission of your Contribution full details respecting such Third Party +Content and Third Party Rights, including, without limitation, identification of which aspects of your +Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the +Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable +third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater +certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights +do not apply to any portion of a Project that is incorporated into your Contribution to that same Project. + +### Representations. + +You represent that, other than the Third Party Content and Third Party Rights identified by +you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled +to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were +created in the course of your employment with your past or present employer(s), you represent that such +employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer +(s) has waived all of their right, title or interest in or to your Contributions. + +### Disclaimer. + +To the fullest extent permitted under applicable law, your Contributions are provided on an "as is" +basis, without any warranties or conditions, express or implied, including, without limitation, any implied +warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not +required to provide support for your Contributions, except to the extent you desire to provide support. + +### No Obligation. + +You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions +into the project. The decision to use or incorporate your contributions into the project will be made at the +sole discretion of the maintainers or their authorized delegates. diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 2d928092..cd3a32f5 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -3,6 +3,7 @@ Below is a list of developers who have contributed to torch2trt. This is also used to track contributors who have agreed to torch2trt's Contributor License Agreement. +- [John Welsh](https://github.com/jaybdub) (CLA) - John Welsh ## Becoming a Contributor @@ -42,6 +43,6 @@ In some instances, you may be requested to sign torch2trt's Contributor License 4. Make a signed commit with the following text ```md - git commit -S -m "I have read and agree to the Contributor License Agreement as written in the file CLA.pdf of this project. Signed, " + git commit -S -m "I have read and agree to the Contributor License Agreement as written in the file CLA.md of this project. Signed, " ``` diff --git a/torch2trt/converters/view.py b/torch2trt/converters/view.py index b60dabb4..72a3394a 100644 --- a/torch2trt/converters/view.py +++ b/torch2trt/converters/view.py @@ -12,10 +12,14 @@ @tensorrt_converter('torch.unsqueeze') def convert_view(ctx): input = ctx.method_args[0] + print("input_shape",input.shape) input_trt = add_missing_trt_tensors(ctx.network, [input])[0] + print("input_trt",input_trt.shape) output = ctx.method_return + print("o/p",output.shape) layer = ctx.network.add_shuffle(input_trt) layer.reshape_dims = tuple(output.shape[1:]) + print("trt layer o/p shape",layer.get_output(0).shape) output._trt = layer.get_output(0) @@ -45,13 +49,13 @@ def forward(self, x): return x.unsqueeze(dim=self.dim) -@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)]) -@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)]) -@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)]) +#@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)]) +#@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)]) +@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)],max_batch_size=3) def test_view_1d(): return View(1, -1) - +''' @add_module_test(torch.float32, torch.device('cuda'), [(1, 3)]) @add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)]) @add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)]) @@ -73,5 +77,5 @@ def test_unsqueeze(): @add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 3)]) def test_squeeze(): return Squeeze(2) - +''' diff --git a/torch2trt/test.py b/torch2trt/test.py index dec9bb88..bb01546f 100644 --- a/torch2trt/test.py +++ b/torch2trt/test.py @@ -45,6 +45,8 @@ def run(self): if outputs[i].dtype == torch.bool: max_error_i = torch.sum(outputs[i] ^ outputs_trt[i]) else: + print("under test.py") + print("pytorch op",outputs[i].size(),"trt op",outputs_trt[i].size()) max_error_i = torch.max(torch.abs(outputs[i] - outputs_trt[i])) if max_error_i > max_error: @@ -144,4 +146,4 @@ def run(self): print('NUM_TESTS: %d' % num_tests) print('NUM_SUCCESSFUL_CONVERSION: %d' % num_success) print('NUM_FAILED_CONVERSION: %d' % num_error) - print('NUM_ABOVE_TOLERANCE: %d' % num_tolerance) \ No newline at end of file + print('NUM_ABOVE_TOLERANCE: %d' % num_tolerance) diff --git a/torch2trt/torch2trt.py b/torch2trt/torch2trt.py index 6b153a02..d2fcf2ab 100644 --- a/torch2trt/torch2trt.py +++ b/torch2trt/torch2trt.py @@ -146,15 +146,18 @@ def add_missing_trt_tensors(network, tensors): # get tensor w/ _trt # or... add constant for scalar primitive if isinstance(t, float) or isinstance(t, int): + print("if") shape = (1,) scalar = t * torch.ones(shape, dtype=dtype).cpu().numpy() trt_tensor = network.add_constant(shape, scalar).get_output(0) elif hasattr(t, "_trt"): + print("elif") + print(t.shape,t._trt.shape) trt_tensor = t._trt # or... add constant for leaf tensor w/o _trt else: - + print("else condition") # remove all preceding ones, these can be re-inserted later when broadcasting num_preceding_ones = 0 for j in range(len(t.shape)): @@ -163,14 +166,13 @@ def add_missing_trt_tensors(network, tensors): else: break shape = tuple(t.shape[num_preceding_ones:]) - weight = t.detach().cpu().numpy() t._trt = network.add_constant(shape, weight).get_output(0) trt_tensor = t._trt assert trt_tensor is not None - + print(trt_tensor.shape) trt_tensors[i] = trt_tensor return trt_tensors @@ -514,8 +516,9 @@ def torch2trt(module, inputs_in = inputs # copy inputs to avoid modifications to source data - inputs = [tensor.clone()[0:1] for tensor in inputs] # only run single entry - + inputs = [tensor.clone() for tensor in inputs] # only run single entry + for tensor in inputs: + print(tensor.shape) logger = trt.Logger(log_level) builder = trt.Builder(logger)