From 5984c4431432094861603435aba5231dc8c8d928 Mon Sep 17 00:00:00 2001 From: Andrew Adams Date: Tue, 6 Sep 2016 14:02:44 -0700 Subject: [PATCH] Run clang-format on python bindings --- python_bindings/numpy/dtype.cpp | 258 +++++----- python_bindings/numpy/dtype.hpp | 83 ++-- python_bindings/numpy/internal.hpp | 8 +- python_bindings/numpy/ndarray.cpp | 401 +++++++-------- python_bindings/numpy/ndarray.hpp | 333 ++++++------- python_bindings/numpy/numpy.cpp | 22 +- python_bindings/numpy/numpy.hpp | 8 +- .../numpy/numpy_object_mgr_traits.hpp | 37 +- python_bindings/numpy/readme.text | 7 +- python_bindings/python/Argument.cpp | 98 ++-- python_bindings/python/Argument.h | 2 +- python_bindings/python/BoundaryConditions.cpp | 166 +++---- python_bindings/python/BoundaryConditions.h | 4 +- python_bindings/python/Error.cpp | 18 +- python_bindings/python/Error.h | 2 +- python_bindings/python/Expr.cpp | 89 ++-- python_bindings/python/Expr.h | 6 +- python_bindings/python/Func.cpp | 238 +++++---- python_bindings/python/Func.h | 114 ++--- python_bindings/python/Func_Ref.cpp | 68 ++- python_bindings/python/Func_Ref.h | 2 +- python_bindings/python/Func_Stage.cpp | 89 ++-- python_bindings/python/Func_Stage.h | 2 +- python_bindings/python/Func_VarOrRVar.cpp | 22 +- python_bindings/python/Func_VarOrRVar.h | 3 +- python_bindings/python/Func_gpu.cpp | 4 +- python_bindings/python/Func_gpu.h | 215 ++++---- python_bindings/python/Function.cpp | 8 +- python_bindings/python/Function.h | 2 +- python_bindings/python/Halide.cpp | 5 +- python_bindings/python/IROperator.cpp | 126 ++--- python_bindings/python/IROperator.h | 2 +- python_bindings/python/Image.cpp | 178 +++---- python_bindings/python/Image.h | 4 +- python_bindings/python/InlineReductions.cpp | 26 +- python_bindings/python/InlineReductions.h | 2 +- python_bindings/python/Lambda.cpp | 24 +- python_bindings/python/Lambda.h | 2 +- python_bindings/python/Param.cpp | 469 ++++++++---------- python_bindings/python/Param.h | 2 +- python_bindings/python/RDom.cpp | 291 +++++------ python_bindings/python/RDom.h | 2 +- python_bindings/python/Target.cpp | 121 +++-- python_bindings/python/Target.h | 2 +- python_bindings/python/Type.cpp | 107 ++-- python_bindings/python/Type.h | 7 +- python_bindings/python/Var.cpp | 159 +++--- python_bindings/python/Var.h | 2 +- python_bindings/python/add_operators.cpp | 1 - python_bindings/python/add_operators.h | 105 ++-- 50 files changed, 1803 insertions(+), 2143 deletions(-) diff --git a/python_bindings/numpy/dtype.cpp b/python_bindings/numpy/dtype.cpp index 4f086d76df08..a2e66c4ed1d8 100644 --- a/python_bindings/numpy/dtype.cpp +++ b/python_bindings/numpy/dtype.cpp @@ -9,49 +9,64 @@ #include "internal.hpp" #define DTYPE_FROM_CODE(code) \ - dtype(python::detail::new_reference(reinterpret_cast(PyArray_DescrFromType(code)))) - -#define BUILTIN_INT_DTYPE(bits) \ - template <> struct builtin_int_dtype< bits, false > { \ - static dtype get() { return DTYPE_FROM_CODE(NPY_INT ## bits); } \ - }; \ - template <> struct builtin_int_dtype< bits, true > { \ - static dtype get() { return DTYPE_FROM_CODE(NPY_UINT ## bits); } \ - }; \ - template dtype get_int_dtype< bits, false >(); \ - template dtype get_int_dtype< bits, true >() + dtype(python::detail::new_reference(reinterpret_cast(PyArray_DescrFromType(code)))) + +#define BUILTIN_INT_DTYPE(bits) \ + template <> \ + struct builtin_int_dtype { \ + static dtype get() { return DTYPE_FROM_CODE(NPY_INT##bits); } \ + }; \ + template <> \ + struct builtin_int_dtype { \ + static dtype get() { return DTYPE_FROM_CODE(NPY_UINT##bits); } \ + }; \ + template dtype get_int_dtype(); \ + template dtype get_int_dtype() #define BUILTIN_FLOAT_DTYPE(bits) \ - template <> struct builtin_float_dtype< bits > { \ - static dtype get() { return DTYPE_FROM_CODE(NPY_FLOAT ## bits); } \ - }; \ - template dtype get_float_dtype< bits >() - -#define BUILTIN_COMPLEX_DTYPE(bits) \ - template <> struct builtin_complex_dtype< bits > { \ - static dtype get() { return DTYPE_FROM_CODE(NPY_COMPLEX ## bits); } \ + template <> \ + struct builtin_float_dtype { \ + static dtype get() { return DTYPE_FROM_CODE(NPY_FLOAT##bits); } \ }; \ - template dtype get_complex_dtype< bits >() - -namespace boost { namespace python { namespace converter { + template dtype get_float_dtype() + +#define BUILTIN_COMPLEX_DTYPE(bits) \ + template <> \ + struct builtin_complex_dtype { \ + static dtype get() { return DTYPE_FROM_CODE(NPY_COMPLEX##bits); } \ + }; \ + template dtype get_complex_dtype() + +namespace boost { +namespace python { +namespace converter { NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyArrayDescr_Type, Halide::numpy::dtype) -}}} // namespace boost::python::converter +} +} +} // namespace boost::python::converter -namespace Halide { namespace numpy { +namespace Halide { +namespace numpy { namespace detail { -dtype builtin_dtype::get() { return DTYPE_FROM_CODE(NPY_BOOL); } +dtype builtin_dtype::get() { return DTYPE_FROM_CODE(NPY_BOOL); } -template struct builtin_int_dtype; -template struct builtin_float_dtype; -template struct builtin_complex_dtype; +template +struct builtin_int_dtype; +template +struct builtin_float_dtype; +template +struct builtin_complex_dtype; -template dtype get_int_dtype() { - return builtin_int_dtype::get(); +template +dtype get_int_dtype() { + return builtin_int_dtype::get(); } -template dtype get_float_dtype() { return builtin_float_dtype::get(); } -template dtype get_complex_dtype() { return builtin_complex_dtype::get(); } +template +dtype get_float_dtype() { return builtin_float_dtype::get(); } +template +dtype get_complex_dtype() { return builtin_complex_dtype::get(); } BUILTIN_INT_DTYPE(8); BUILTIN_INT_DTYPE(16); @@ -63,58 +78,58 @@ BUILTIN_FLOAT_DTYPE(64); BUILTIN_COMPLEX_DTYPE(64); BUILTIN_COMPLEX_DTYPE(128); #if NPY_BITSOF_LONGDOUBLE > NPY_BITSOF_DOUBLE -template <> struct builtin_float_dtype< NPY_BITSOF_LONGDOUBLE > { +template <> +struct builtin_float_dtype { static dtype get() { return DTYPE_FROM_CODE(NPY_LONGDOUBLE); } }; -template dtype get_float_dtype< NPY_BITSOF_LONGDOUBLE >(); -template <> struct builtin_complex_dtype< 2 * NPY_BITSOF_LONGDOUBLE > { +template dtype get_float_dtype(); +template <> +struct builtin_complex_dtype<2 * NPY_BITSOF_LONGDOUBLE> { static dtype get() { return DTYPE_FROM_CODE(NPY_CLONGDOUBLE); } }; -template dtype get_complex_dtype< 2 * NPY_BITSOF_LONGDOUBLE >(); +template dtype get_complex_dtype<2 * NPY_BITSOF_LONGDOUBLE>(); #endif -} // namespace detail - -python::detail::new_reference dtype::convert(python::object const & arg, bool align) { - PyArray_Descr* obj=NULL; - if (align) { - if (PyArray_DescrAlignConverter(arg.ptr(), &obj) < 0) - python::throw_error_already_set(); - } else { - if (PyArray_DescrConverter(arg.ptr(), &obj) < 0) - python::throw_error_already_set(); - } - return python::detail::new_reference(reinterpret_cast(obj)); +} // namespace detail + +python::detail::new_reference dtype::convert(python::object const &arg, bool align) { + PyArray_Descr *obj = NULL; + if (align) { + if (PyArray_DescrAlignConverter(arg.ptr(), &obj) < 0) + python::throw_error_already_set(); + } else { + if (PyArray_DescrConverter(arg.ptr(), &obj) < 0) + python::throw_error_already_set(); + } + return python::detail::new_reference(reinterpret_cast(obj)); } -int dtype::get_itemsize() const { return reinterpret_cast(ptr())->elsize;} +int dtype::get_itemsize() const { return reinterpret_cast(ptr())->elsize; } -bool equivalent(dtype const & a, dtype const & b) { - // On Windows x64, the behaviour described on - // http://docs.scipy.org/doc/numpy/reference/c-api.array.html for - // PyArray_EquivTypes unfortunately does not extend as expected: - // "For example, on 32-bit platforms, NPY_LONG and NPY_INT are equivalent". - // This should also hold for 64-bit platforms (and does on Linux), but not - // on Windows. Implement an alternative: +bool equivalent(dtype const &a, dtype const &b) { +// On Windows x64, the behaviour described on +// http://docs.scipy.org/doc/numpy/reference/c-api.array.html for +// PyArray_EquivTypes unfortunately does not extend as expected: +// "For example, on 32-bit platforms, NPY_LONG and NPY_INT are equivalent". +// This should also hold for 64-bit platforms (and does on Linux), but not +// on Windows. Implement an alternative: #ifdef _MSC_VER if (sizeof(long) == sizeof(int) && // Manually take care of the type equivalence. ((a == dtype::get_builtin() || a == dtype::get_builtin()) && - (b == dtype::get_builtin() || b == dtype::get_builtin()) || + (b == dtype::get_builtin() || b == dtype::get_builtin()) || (a == dtype::get_builtin() || a == dtype::get_builtin()) && - (b == dtype::get_builtin() || b == dtype::get_builtin()))) { + (b == dtype::get_builtin() || b == dtype::get_builtin()))) { return true; } else { return PyArray_EquivTypes( - reinterpret_cast(a.ptr()), - reinterpret_cast(b.ptr()) - ); + reinterpret_cast(a.ptr()), + reinterpret_cast(b.ptr())); } #else return PyArray_EquivTypes( - reinterpret_cast(a.ptr()), - reinterpret_cast(b.ptr()) - ); + reinterpret_cast(a.ptr()), + reinterpret_cast(b.ptr())); #endif } @@ -125,76 +140,75 @@ namespace pyconv = boost::python::converter; template class array_scalar_converter { public: + static PyTypeObject const *get_pytype() { + // This implementation depends on the fact that get_builtin returns pointers to objects + // NumPy has declared statically, and that the typeobj member also refers to a static + // object. That means we don't need to do any reference counting. + // In fact, I'm somewhat concerned that increasing the reference count of any of these + // might cause leaks, because I don't think Boost.Python ever decrements it, but it's + // probably a moot point if everything is actually static. + return reinterpret_cast(dtype::get_builtin().ptr())->typeobj; + } - static PyTypeObject const * get_pytype() { - // This implementation depends on the fact that get_builtin returns pointers to objects - // NumPy has declared statically, and that the typeobj member also refers to a static - // object. That means we don't need to do any reference counting. - // In fact, I'm somewhat concerned that increasing the reference count of any of these - // might cause leaks, because I don't think Boost.Python ever decrements it, but it's - // probably a moot point if everything is actually static. - return reinterpret_cast(dtype::get_builtin().ptr())->typeobj; - } - - static void * convertible(PyObject * obj) { - if (obj->ob_type == get_pytype()) { - return obj; - } else { - dtype dt(python::detail::borrowed_reference(obj->ob_type)); - if (equivalent(dt, dtype::get_builtin())) { + static void *convertible(PyObject *obj) { + if (obj->ob_type == get_pytype()) { return obj; + } else { + dtype dt(python::detail::borrowed_reference(obj->ob_type)); + if (equivalent(dt, dtype::get_builtin())) { + return obj; + } } - } - return 0; - } - - static void convert(PyObject * obj, pyconv::rvalue_from_python_stage1_data* data) { - void * storage = reinterpret_cast*>(data)->storage.bytes; - // We assume std::complex is a "standard layout" here and elsewhere; not guaranteed by - // C++03 standard, but true in every known implementation (and guaranteed by C++11). - PyArray_ScalarAsCtype(obj, reinterpret_cast(storage)); - data->convertible = storage; - } - - static void declare() { - pyconv::registry::push_back( - &convertible, &convert, python::type_id() + return 0; + } + + static void convert(PyObject *obj, pyconv::rvalue_from_python_stage1_data *data) { + void *storage = reinterpret_cast *>(data)->storage.bytes; + // We assume std::complex is a "standard layout" here and elsewhere; not guaranteed by + // C++03 standard, but true in every known implementation (and guaranteed by C++11). + PyArray_ScalarAsCtype(obj, reinterpret_cast(storage)); + data->convertible = storage; + } + + static void declare() { + pyconv::registry::push_back( + &convertible, &convert, python::type_id() #ifndef BOOST_PYTHON_NO_PY_SIGNATURES - , &get_pytype + , + &get_pytype #endif - ); - } - + ); + } }; -} // anonymous +} // anonymous void dtype::register_scalar_converters() { - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); #ifdef _MSC_VER - // Since the npy_(u)int32 types are defined as long types and treated - // as being different from the int32 types, these converters must be declared - // explicitely. - array_scalar_converter::declare(); - array_scalar_converter::declare(); + // Since the npy_(u)int32 types are defined as long types and treated + // as being different from the int32 types, these converters must be declared + // explicitely. + array_scalar_converter::declare(); + array_scalar_converter::declare(); #endif - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter::declare(); - array_scalar_converter< std::complex >::declare(); - array_scalar_converter< std::complex >::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter::declare(); + array_scalar_converter>::declare(); + array_scalar_converter>::declare(); #if NPY_BITSOF_LONGDOUBLE > NPY_BITSOF_DOUBLE - array_scalar_converter::declare(); - array_scalar_converter< std::complex >::declare(); + array_scalar_converter::declare(); + array_scalar_converter>::declare(); #endif } -} // namespace Halide::numpy -} // namespace Halide +} // namespace Halide::numpy +} // namespace Halide diff --git a/python_bindings/numpy/dtype.hpp b/python_bindings/numpy/dtype.hpp index 54760d56d6b7..9d96d7b66d12 100644 --- a/python_bindings/numpy/dtype.hpp +++ b/python_bindings/numpy/dtype.hpp @@ -10,13 +10,14 @@ * @brief Object manager for Python's numpy.dtype class. */ -#include #include "numpy_object_mgr_traits.hpp" +#include #include #include -namespace Halide { namespace numpy { +namespace Halide { +namespace numpy { using namespace boost; /** @@ -25,14 +26,14 @@ using namespace boost; * @todo This could have a lot more interesting accessors. */ class dtype : public python::object { - static python::detail::new_reference convert(python::object::object_cref arg, bool align); -public: + static python::detail::new_reference convert(python::object::object_cref arg, bool align); - /// @brief Convert an arbitrary Python object to a data-type descriptor object. - template - explicit dtype(T arg, bool align=false) : python::object(convert(arg, align)) {} +public: + /// @brief Convert an arbitrary Python object to a data-type descriptor object. + template + explicit dtype(T arg, bool align = false) : python::object(convert(arg, align)) {} - /** + /** * @brief Get the built-in numpy dtype associated with the given scalar template type. * * This is perhaps the most useful part of the numpy API: it returns the dtype object @@ -43,74 +44,80 @@ class dtype : public python::object { * It can also be useful for users to add explicit specializations for POD structs * that return field-based dtypes. */ - template static dtype get_builtin(); + template + static dtype get_builtin(); - /// @brief Return the size of the data type in bytes. - int get_itemsize() const; + /// @brief Return the size of the data type in bytes. + int get_itemsize() const; - /** + /** * @brief Compare two dtypes for equivalence. * * This is more permissive than equality tests. For instance, if long and int are the same * size, the dtypes corresponding to each will be equivalent, but not equal. */ - friend bool equivalent(dtype const & a, dtype const & b); + friend bool equivalent(dtype const &a, dtype const &b); - /** + /** * @brief Register from-Python converters for NumPy's built-in array scalar types. * * This is usually called automatically by initialize(), and shouldn't be called twice * (doing so just adds unused converters to the Boost.Python registry). */ - static void register_scalar_converters(); - - BOOST_PYTHON_FORWARD_OBJECT_CONSTRUCTORS(dtype, python::object); + static void register_scalar_converters(); + BOOST_PYTHON_FORWARD_OBJECT_CONSTRUCTORS(dtype, python::object); }; -bool equivalent(dtype const & a, dtype const & b); +bool equivalent(dtype const &a, dtype const &b); -namespace detail -{ +namespace detail { -template dtype get_int_dtype(); +template +dtype get_int_dtype(); -template dtype get_float_dtype(); +template +dtype get_float_dtype(); -template dtype get_complex_dtype(); +template +dtype get_complex_dtype(); -template ::value> +template ::value> struct builtin_dtype; template -struct builtin_dtype { - static dtype get() { return get_int_dtype< 8*sizeof(T), boost::is_unsigned::value >(); } +struct builtin_dtype { + static dtype get() { return get_int_dtype<8 * sizeof(T), boost::is_unsigned::value>(); } }; template <> -struct builtin_dtype { - static dtype get(); +struct builtin_dtype { + static dtype get(); }; template -struct builtin_dtype { - static dtype get() { return get_float_dtype< 8*sizeof(T) >(); } +struct builtin_dtype { + static dtype get() { return get_float_dtype<8 * sizeof(T)>(); } }; template -struct builtin_dtype< std::complex, false > { - static dtype get() { return get_complex_dtype< 16*sizeof(T) >(); } +struct builtin_dtype, false> { + static dtype get() { return get_complex_dtype<16 * sizeof(T)>(); } }; -} // namespace detail +} // namespace detail template inline dtype dtype::get_builtin() { return detail::builtin_dtype::get(); } +} +} // namespace Halide::numpy -}} // namespace Halide::numpy - -namespace boost { namespace python { namespace converter { +namespace boost { +namespace python { +namespace converter { NUMPY_OBJECT_MANAGER_TRAITS(Halide::numpy::dtype); -}}} // namespace Halide::python::converter +} +} +} // namespace Halide::python::converter -#endif // !HALIDE_NUMPY_DTYPE_HPP_INCLUDED +#endif // !HALIDE_NUMPY_DTYPE_HPP_INCLUDED diff --git a/python_bindings/numpy/internal.hpp b/python_bindings/numpy/internal.hpp index 304cacc91b8e..1dd25c60187e 100644 --- a/python_bindings/numpy/internal.hpp +++ b/python_bindings/numpy/internal.hpp @@ -23,11 +23,11 @@ ERROR_internal_hpp_is_for_internal_use_only #endif #define PY_ARRAY_UNIQUE_SYMBOL HALIDE_NUMPY_ARRAY_API #define PY_UFUNC_UNIQUE_SYMBOL HALIDE_UFUNC_ARRAY_API +#include "numpy.hpp" #include #include -#include "numpy.hpp" -#define NUMPY_OBJECT_MANAGER_TRAITS_IMPL(pytype,manager) \ - PyTypeObject const * object_manager_traits::get_pytype() { return &pytype; } +#define NUMPY_OBJECT_MANAGER_TRAITS_IMPL(pytype, manager) \ + PyTypeObject const *object_manager_traits::get_pytype() { return &pytype; } -#endif // !HALIDE_NUMPY_INTERNAL_HPP_INCLUDED +#endif // !HALIDE_NUMPY_INTERNAL_HPP_INCLUDED diff --git a/python_bindings/numpy/ndarray.cpp b/python_bindings/numpy/ndarray.cpp index f9b6ab118909..c84043b8aefa 100644 --- a/python_bindings/numpy/ndarray.cpp +++ b/python_bindings/numpy/ndarray.cpp @@ -7,278 +7,223 @@ #include "internal.hpp" #include -namespace boost -{ -namespace python -{ -namespace converter -{ +namespace boost { +namespace python { +namespace converter { NUMPY_OBJECT_MANAGER_TRAITS_IMPL(PyArray_Type, Halide::numpy::ndarray) -} // namespace boost::python::converter -} // namespace boost::python -} // namespace boost - -namespace Halide -{ -namespace numpy -{ -namespace detail -{ - -ndarray::bitflag numpy_to_bitflag(int const f) -{ - ndarray::bitflag r = ndarray::NONE; - if (f & NPY_C_CONTIGUOUS) r = (r | ndarray::C_CONTIGUOUS); - if (f & NPY_F_CONTIGUOUS) r = (r | ndarray::F_CONTIGUOUS); - if (f & NPY_ALIGNED) r = (r | ndarray::ALIGNED); - if (f & NPY_WRITEABLE) r = (r | ndarray::WRITEABLE); - return r; -} - -int const bitflag_to_numpy(ndarray::bitflag f) -{ - int r = 0; - if (f & ndarray::C_CONTIGUOUS) r |= NPY_C_CONTIGUOUS; - if (f & ndarray::F_CONTIGUOUS) r |= NPY_F_CONTIGUOUS; - if (f & ndarray::ALIGNED) r |= NPY_ALIGNED; - if (f & ndarray::WRITEABLE) r |= NPY_WRITEABLE; - return r; -} - -bool is_c_contiguous(std::vector const & shape, - std::vector const & strides, - int itemsize) -{ - std::vector::const_reverse_iterator j = strides.rbegin(); - int total = itemsize; - for (std::vector::const_reverse_iterator i = shape.rbegin(); i != shape.rend(); ++i, ++j) - { - if (total != *j) return false; - total *= (*i); - } - return true; -} - -bool is_f_contiguous(std::vector const & shape, - std::vector const & strides, - int itemsize) -{ - std::vector::const_iterator j = strides.begin(); - int total = itemsize; - for (std::vector::const_iterator i = shape.begin(); i != shape.end(); ++i, ++j) - { - if (total != *j) return false; - total *= (*i); - } - return true; -} - -bool is_aligned(std::vector const & strides, - int itemsize) -{ - for (std::vector::const_iterator i = strides.begin(); i != strides.end(); ++i) - { - if (*i % itemsize) return false; - } - return true; -} - -inline PyArray_Descr * incref_dtype(dtype const & dt) -{ - Py_INCREF(dt.ptr()); - return reinterpret_cast(dt.ptr()); +} // namespace boost::python::converter +} // namespace boost::python +} // namespace boost + +namespace Halide { +namespace numpy { +namespace detail { + +ndarray::bitflag numpy_to_bitflag(int const f) { + ndarray::bitflag r = ndarray::NONE; + if (f & NPY_C_CONTIGUOUS) r = (r | ndarray::C_CONTIGUOUS); + if (f & NPY_F_CONTIGUOUS) r = (r | ndarray::F_CONTIGUOUS); + if (f & NPY_ALIGNED) r = (r | ndarray::ALIGNED); + if (f & NPY_WRITEABLE) r = (r | ndarray::WRITEABLE); + return r; } -ndarray from_data_impl(void * data, - dtype const & dt, - python::object const & shape, - python::object const & strides, - python::object const & owner, - bool writeable) -{ - std::vector shape_(len(shape)); - std::vector strides_(len(strides)); - if (shape_.size() != strides_.size()) - { - PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match."); - python::throw_error_already_set(); - } - for (std::size_t i = 0; i < shape_.size(); ++i) - { - shape_[i] = python::extract(shape[i]); - strides_[i] = python::extract(strides[i]); - } - return from_data_impl(data, dt, shape_, strides_, owner, writeable); +int const bitflag_to_numpy(ndarray::bitflag f) { + int r = 0; + if (f & ndarray::C_CONTIGUOUS) r |= NPY_C_CONTIGUOUS; + if (f & ndarray::F_CONTIGUOUS) r |= NPY_F_CONTIGUOUS; + if (f & ndarray::ALIGNED) r |= NPY_ALIGNED; + if (f & ndarray::WRITEABLE) r |= NPY_WRITEABLE; + return r; } -ndarray from_data_impl(void * data, - dtype const & dt, - std::vector const & shape, - std::vector const & strides, - python::object const & owner, - bool writeable) -{ - if (shape.size() != strides.size()) - { - PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match."); - python::throw_error_already_set(); - } - int itemsize = dt.get_itemsize(); - int flags = 0; - if (writeable) flags |= NPY_WRITEABLE; - if (is_c_contiguous(shape, strides, itemsize)) flags |= NPY_C_CONTIGUOUS; - if (is_f_contiguous(shape, strides, itemsize)) flags |= NPY_F_CONTIGUOUS; - if (is_aligned(strides, itemsize)) flags |= NPY_ALIGNED; - ndarray r(python::detail::new_reference - (PyArray_NewFromDescr(&PyArray_Type, - incref_dtype(dt), - shape.size(), - const_cast(&shape.front()), - const_cast(&strides.front()), - data, - flags, - NULL))); +bool is_c_contiguous(std::vector const &shape, + std::vector const &strides, + int itemsize) { + std::vector::const_reverse_iterator j = strides.rbegin(); + int total = itemsize; + for (std::vector::const_reverse_iterator i = shape.rbegin(); i != shape.rend(); ++i, ++j) { + if (total != *j) return false; + total *= (*i); + } + return true; +} + +bool is_f_contiguous(std::vector const &shape, + std::vector const &strides, + int itemsize) { + std::vector::const_iterator j = strides.begin(); + int total = itemsize; + for (std::vector::const_iterator i = shape.begin(); i != shape.end(); ++i, ++j) { + if (total != *j) return false; + total *= (*i); + } + return true; +} + +bool is_aligned(std::vector const &strides, + int itemsize) { + for (std::vector::const_iterator i = strides.begin(); i != strides.end(); ++i) { + if (*i % itemsize) return false; + } + return true; +} + +inline PyArray_Descr *incref_dtype(dtype const &dt) { + Py_INCREF(dt.ptr()); + return reinterpret_cast(dt.ptr()); +} + +ndarray from_data_impl(void *data, + dtype const &dt, + python::object const &shape, + python::object const &strides, + python::object const &owner, + bool writeable) { + std::vector shape_(len(shape)); + std::vector strides_(len(strides)); + if (shape_.size() != strides_.size()) { + PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match."); + python::throw_error_already_set(); + } + for (std::size_t i = 0; i < shape_.size(); ++i) { + shape_[i] = python::extract(shape[i]); + strides_[i] = python::extract(strides[i]); + } + return from_data_impl(data, dt, shape_, strides_, owner, writeable); +} + +ndarray from_data_impl(void *data, + dtype const &dt, + std::vector const &shape, + std::vector const &strides, + python::object const &owner, + bool writeable) { + if (shape.size() != strides.size()) { + PyErr_SetString(PyExc_ValueError, "Length of shape and strides arrays do not match."); + python::throw_error_already_set(); + } + int itemsize = dt.get_itemsize(); + int flags = 0; + if (writeable) flags |= NPY_WRITEABLE; + if (is_c_contiguous(shape, strides, itemsize)) flags |= NPY_C_CONTIGUOUS; + if (is_f_contiguous(shape, strides, itemsize)) flags |= NPY_F_CONTIGUOUS; + if (is_aligned(strides, itemsize)) flags |= NPY_ALIGNED; + ndarray r(python::detail::new_reference(PyArray_NewFromDescr(&PyArray_Type, + incref_dtype(dt), + shape.size(), + const_cast(&shape.front()), + const_cast(&strides.front()), + data, + flags, + NULL))); r.set_base(owner); return r; } -} // namespace detail +} // namespace detail -ndarray ndarray::view(dtype const & dt) const -{ - return ndarray(python::detail::new_reference - (PyObject_CallMethod(this->ptr(), const_cast("view"), const_cast("O"), dt.ptr()))); +ndarray ndarray::view(dtype const &dt) const { + return ndarray(python::detail::new_reference(PyObject_CallMethod(this->ptr(), const_cast("view"), const_cast("O"), dt.ptr()))); } - -ndarray ndarray::astype(dtype const & dt) const -{ - return ndarray(python::detail::new_reference - (PyObject_CallMethod(this->ptr(), const_cast("astype"), const_cast("O"), dt.ptr()))); + +ndarray ndarray::astype(dtype const &dt) const { + return ndarray(python::detail::new_reference(PyObject_CallMethod(this->ptr(), const_cast("astype"), const_cast("O"), dt.ptr()))); } -ndarray ndarray::copy() const -{ - return ndarray(python::detail::new_reference - (PyObject_CallMethod(this->ptr(), const_cast("copy"), const_cast("")))); +ndarray ndarray::copy() const { + return ndarray(python::detail::new_reference(PyObject_CallMethod(this->ptr(), const_cast("copy"), const_cast("")))); } -dtype ndarray::get_dtype() const -{ - return dtype(python::detail::borrowed_reference(get_struct()->descr)); +dtype ndarray::get_dtype() const { + return dtype(python::detail::borrowed_reference(get_struct()->descr)); } -python::object ndarray::get_base() const -{ - if (get_struct()->base == NULL) return object(); - return python::object(python::detail::borrowed_reference(get_struct()->base)); +python::object ndarray::get_base() const { + if (get_struct()->base == NULL) return object(); + return python::object(python::detail::borrowed_reference(get_struct()->base)); } -void ndarray::set_base(object const & base) -{ - Py_XDECREF(get_struct()->base); - if (base != object()) - { - Py_INCREF(base.ptr()); - get_struct()->base = base.ptr(); - } - else - { - get_struct()->base = NULL; - } +void ndarray::set_base(object const &base) { + Py_XDECREF(get_struct()->base); + if (base != object()) { + Py_INCREF(base.ptr()); + get_struct()->base = base.ptr(); + } else { + get_struct()->base = NULL; + } } -ndarray::bitflag const ndarray::get_flags() const -{ - return numpy::detail::numpy_to_bitflag(get_struct()->flags); +ndarray::bitflag const ndarray::get_flags() const { + return numpy::detail::numpy_to_bitflag(get_struct()->flags); } -ndarray ndarray::transpose() const -{ - return ndarray(python::detail::new_reference - (PyArray_Transpose(reinterpret_cast(this->ptr()), NULL))); +ndarray ndarray::transpose() const { + return ndarray(python::detail::new_reference(PyArray_Transpose(reinterpret_cast(this->ptr()), NULL))); } -ndarray ndarray::squeeze() const -{ - return ndarray(python::detail::new_reference - (PyArray_Squeeze(reinterpret_cast(this->ptr())))); +ndarray ndarray::squeeze() const { + return ndarray(python::detail::new_reference(PyArray_Squeeze(reinterpret_cast(this->ptr())))); } -ndarray ndarray::reshape(python::tuple const & shape) const -{ - return ndarray(python::detail::new_reference - (PyArray_Reshape(reinterpret_cast(this->ptr()), shape.ptr()))); +ndarray ndarray::reshape(python::tuple const &shape) const { + return ndarray(python::detail::new_reference(PyArray_Reshape(reinterpret_cast(this->ptr()), shape.ptr()))); } -python::object ndarray::scalarize() const -{ - Py_INCREF(ptr()); - return python::object(python::detail::new_reference(PyArray_Return(reinterpret_cast(ptr())))); +python::object ndarray::scalarize() const { + Py_INCREF(ptr()); + return python::object(python::detail::new_reference(PyArray_Return(reinterpret_cast(ptr())))); } -ndarray zeros(python::tuple const & shape, dtype const & dt) -{ - int nd = len(shape); - boost::scoped_array dims(new Py_intptr_t[nd]); - for (int n=0; n(shape[n]); - return ndarray(python::detail::new_reference - (PyArray_Zeros(nd, dims.get(), detail::incref_dtype(dt), 0))); +ndarray zeros(python::tuple const &shape, dtype const &dt) { + int nd = len(shape); + boost::scoped_array dims(new Py_intptr_t[nd]); + for (int n = 0; n < nd; ++n) + dims[n] = python::extract(shape[n]); + return ndarray(python::detail::new_reference(PyArray_Zeros(nd, dims.get(), detail::incref_dtype(dt), 0))); } -ndarray zeros(int nd, Py_intptr_t const * shape, dtype const & dt) -{ - return ndarray(python::detail::new_reference - (PyArray_Zeros(nd, const_cast(shape), detail::incref_dtype(dt), 0))); +ndarray zeros(int nd, Py_intptr_t const *shape, dtype const &dt) { + return ndarray(python::detail::new_reference(PyArray_Zeros(nd, const_cast(shape), detail::incref_dtype(dt), 0))); } -ndarray empty(python::tuple const & shape, dtype const & dt) -{ - int nd = len(shape); - boost::scoped_array dims(new Py_intptr_t[nd]); - for (int n=0; n(shape[n]); - return ndarray(python::detail::new_reference - (PyArray_Empty(nd, dims.get(), detail::incref_dtype(dt), 0))); +ndarray empty(python::tuple const &shape, dtype const &dt) { + int nd = len(shape); + boost::scoped_array dims(new Py_intptr_t[nd]); + for (int n = 0; n < nd; ++n) + dims[n] = python::extract(shape[n]); + return ndarray(python::detail::new_reference(PyArray_Empty(nd, dims.get(), detail::incref_dtype(dt), 0))); } -ndarray empty(int nd, Py_intptr_t const * shape, dtype const & dt) -{ - return ndarray(python::detail::new_reference - (PyArray_Empty(nd, const_cast(shape), detail::incref_dtype(dt), 0))); +ndarray empty(int nd, Py_intptr_t const *shape, dtype const &dt) { + return ndarray(python::detail::new_reference(PyArray_Empty(nd, const_cast(shape), detail::incref_dtype(dt), 0))); } -ndarray array(python::object const & obj) -{ - return ndarray(python::detail::new_reference - (PyArray_FromAny(obj.ptr(), NULL, 0, 0, NPY_ENSUREARRAY, NULL))); +ndarray array(python::object const &obj) { + return ndarray(python::detail::new_reference(PyArray_FromAny(obj.ptr(), NULL, 0, 0, NPY_ENSUREARRAY, NULL))); } -ndarray array(python::object const & obj, dtype const & dt) -{ - return ndarray(python::detail::new_reference - (PyArray_FromAny(obj.ptr(), detail::incref_dtype(dt), 0, 0, NPY_ENSUREARRAY, NULL))); +ndarray array(python::object const &obj, dtype const &dt) { + return ndarray(python::detail::new_reference(PyArray_FromAny(obj.ptr(), detail::incref_dtype(dt), 0, 0, NPY_ENSUREARRAY, NULL))); } -ndarray from_object(python::object const & obj, dtype const & dt, int nd_min, int nd_max, ndarray::bitflag flags) -{ - int requirements = detail::bitflag_to_numpy(flags); - return ndarray(python::detail::new_reference - (PyArray_FromAny(obj.ptr(), - detail::incref_dtype(dt), - nd_min, nd_max, - requirements, - NULL))); +ndarray from_object(python::object const &obj, dtype const &dt, int nd_min, int nd_max, ndarray::bitflag flags) { + int requirements = detail::bitflag_to_numpy(flags); + return ndarray(python::detail::new_reference(PyArray_FromAny(obj.ptr(), + detail::incref_dtype(dt), + nd_min, nd_max, + requirements, + NULL))); } -ndarray from_object(python::object const & obj, int nd_min, int nd_max, ndarray::bitflag flags) -{ - int requirements = detail::bitflag_to_numpy(flags); - return ndarray(python::detail::new_reference - (PyArray_FromAny(obj.ptr(), - NULL, - nd_min, nd_max, - requirements, - NULL))); +ndarray from_object(python::object const &obj, int nd_min, int nd_max, ndarray::bitflag flags) { + int requirements = detail::bitflag_to_numpy(flags); + return ndarray(python::detail::new_reference(PyArray_FromAny(obj.ptr(), + NULL, + nd_min, nd_max, + requirements, + NULL))); } -} // namespace Halide::numpy -} // namespace Halide +} // namespace Halide::numpy +} // namespace Halide diff --git a/python_bindings/numpy/ndarray.hpp b/python_bindings/numpy/ndarray.hpp index 9eddd74199ec..427f56642bd6 100644 --- a/python_bindings/numpy/ndarray.hpp +++ b/python_bindings/numpy/ndarray.hpp @@ -10,18 +10,16 @@ * @brief Object manager and various utilities for numpy.ndarray. */ +#include "dtype.hpp" +#include "numpy_object_mgr_traits.hpp" #include -#include #include -#include "numpy_object_mgr_traits.hpp" -#include "dtype.hpp" +#include #include -namespace Halide -{ -namespace numpy -{ +namespace Halide { +namespace numpy { /** * @brief A boost.python "object manager" (subclass of object) for numpy.ndarray. @@ -29,34 +27,30 @@ namespace numpy * @todo This could have a lot more functionality (like boost::python::numeric::array). * Right now all that exists is what was needed to move raw data between C++ and Python. */ -class ndarray : public python::object -{ +class ndarray : public python::object { - /** + /** * @brief An internal struct that's byte-compatible with PyArrayObject. * * This is just a hack to allow inline access to this stuff while hiding numpy/arrayobject.h * from the user. */ - struct array_struct - { - PyObject_HEAD - char * data; - int nd; - Py_intptr_t * shape; - Py_intptr_t * strides; - PyObject * base; - PyObject * descr; - int flags; - PyObject * weakreflist; - }; - - /// @brief Return the held Python object as an array_struct. - array_struct * get_struct() const { return reinterpret_cast(this->ptr()); } + struct array_struct { + PyObject_HEAD char *data; + int nd; + Py_intptr_t *shape; + Py_intptr_t *strides; + PyObject *base; + PyObject *descr; + int flags; + PyObject *weakreflist; + }; + + /// @brief Return the held Python object as an array_struct. + array_struct *get_struct() const { return reinterpret_cast(this->ptr()); } public: - - /** + /** * @brief Enum to represent (some) of Numpy's internal flags. * * These don't match the actual Numpy flag values; we can't get those without including @@ -66,131 +60,139 @@ class ndarray : public python::object * make these simple integer values for overloading purposes, but the need to * define every possible combination and custom bitwise operators is ugly. */ - enum bitflag - { - NONE=0x0, C_CONTIGUOUS=0x1, F_CONTIGUOUS=0x2, V_CONTIGUOUS=0x1|0x2, - ALIGNED=0x4, WRITEABLE=0x8, BEHAVED=0x4|0x8, - CARRAY_RO=0x1|0x4, CARRAY=0x1|0x4|0x8, CARRAY_MIS=0x1|0x8, - FARRAY_RO=0x2|0x4, FARRAY=0x2|0x4|0x8, FARRAY_MIS=0x2|0x8, - UPDATE_ALL=0x1|0x2|0x4, VARRAY=0x1|0x2|0x8, ALL=0x1|0x2|0x4|0x8 - }; - - BOOST_PYTHON_FORWARD_OBJECT_CONSTRUCTORS(ndarray, object); - - /// @brief Return a view of the scalar with the given dtype. - ndarray view(dtype const & dt) const; - - /// @brief Copy the array, cast to a specified type. - ndarray astype(dtype const & dt) const; - - /// @brief Copy the scalar (deep for all non-object fields). - ndarray copy() const; - - /// @brief Return the size of the nth dimension. - Py_intptr_t const shape(int n) const { return get_shape()[n]; } - - /// @brief Return the stride of the nth dimension. - Py_intptr_t const strides(int n) const { return get_strides()[n]; } - - /** + enum bitflag { + NONE = 0x0, + C_CONTIGUOUS = 0x1, + F_CONTIGUOUS = 0x2, + V_CONTIGUOUS = 0x1 | 0x2, + ALIGNED = 0x4, + WRITEABLE = 0x8, + BEHAVED = 0x4 | 0x8, + CARRAY_RO = 0x1 | 0x4, + CARRAY = 0x1 | 0x4 | 0x8, + CARRAY_MIS = 0x1 | 0x8, + FARRAY_RO = 0x2 | 0x4, + FARRAY = 0x2 | 0x4 | 0x8, + FARRAY_MIS = 0x2 | 0x8, + UPDATE_ALL = 0x1 | 0x2 | 0x4, + VARRAY = 0x1 | 0x2 | 0x8, + ALL = 0x1 | 0x2 | 0x4 | 0x8 + }; + + BOOST_PYTHON_FORWARD_OBJECT_CONSTRUCTORS(ndarray, object); + + /// @brief Return a view of the scalar with the given dtype. + ndarray view(dtype const &dt) const; + + /// @brief Copy the array, cast to a specified type. + ndarray astype(dtype const &dt) const; + + /// @brief Copy the scalar (deep for all non-object fields). + ndarray copy() const; + + /// @brief Return the size of the nth dimension. + Py_intptr_t const shape(int n) const { return get_shape()[n]; } + + /// @brief Return the stride of the nth dimension. + Py_intptr_t const strides(int n) const { return get_strides()[n]; } + + /** * @brief Return the array's raw data pointer. * * This returns char so stride math works properly on it. It's pretty much * expected that the user will have to reinterpret_cast it. */ - char * get_data() const { return get_struct()->data; } - - /// @brief Return the array's data-type descriptor object. - dtype get_dtype() const; - - /// @brief Return the object that owns the array's data, or None if the array owns its own data. - python::object get_base() const; - - /// @brief Set the object that owns the array's data. Use with care. - void set_base(object const & base); - - /// @brief Return the shape of the array as an array of integers (length == get_nd()). - Py_intptr_t const * get_shape() const { return get_struct()->shape; } - - /// @brief Return the stride of the array as an array of integers (length == get_nd()). - Py_intptr_t const * get_strides() const { return get_struct()->strides; } - - /// @brief Return the number of array dimensions. - int const get_nd() const { return get_struct()->nd; } - - /// @brief Return the array flags. - bitflag const get_flags() const; - - /// @brief Reverse the dimensions of the array. - ndarray transpose() const; - - /// @brief Eliminate any unit-sized dimensions. - ndarray squeeze() const; - - /// @brief Equivalent to self.reshape(*shape) in Python. - ndarray reshape(python::tuple const & shape) const; - - /** + char *get_data() const { return get_struct()->data; } + + /// @brief Return the array's data-type descriptor object. + dtype get_dtype() const; + + /// @brief Return the object that owns the array's data, or None if the array owns its own data. + python::object get_base() const; + + /// @brief Set the object that owns the array's data. Use with care. + void set_base(object const &base); + + /// @brief Return the shape of the array as an array of integers (length == get_nd()). + Py_intptr_t const *get_shape() const { return get_struct()->shape; } + + /// @brief Return the stride of the array as an array of integers (length == get_nd()). + Py_intptr_t const *get_strides() const { return get_struct()->strides; } + + /// @brief Return the number of array dimensions. + int const get_nd() const { return get_struct()->nd; } + + /// @brief Return the array flags. + bitflag const get_flags() const; + + /// @brief Reverse the dimensions of the array. + ndarray transpose() const; + + /// @brief Eliminate any unit-sized dimensions. + ndarray squeeze() const; + + /// @brief Equivalent to self.reshape(*shape) in Python. + ndarray reshape(python::tuple const &shape) const; + + /** * @brief If the array contains only a single element, return it as an array scalar; otherwise return * the array. * * @internal This is simply a call to PyArray_Return(); */ - python::object scalarize() const; + python::object scalarize() const; }; /** * @brief Construct a new array with the given shape and data type, with data initialized to zero. */ -ndarray zeros(python::tuple const & shape, dtype const & dt); -ndarray zeros(int nd, Py_intptr_t const * shape, dtype const & dt); +ndarray zeros(python::tuple const &shape, dtype const &dt); +ndarray zeros(int nd, Py_intptr_t const *shape, dtype const &dt); /** * @brief Construct a new array with the given shape and data type, with data left uninitialized. */ -ndarray empty(python::tuple const & shape, dtype const & dt); -ndarray empty(int nd, Py_intptr_t const * shape, dtype const & dt); +ndarray empty(python::tuple const &shape, dtype const &dt); +ndarray empty(int nd, Py_intptr_t const *shape, dtype const &dt); /** * @brief Construct a new array from an arbitrary Python sequence. * * @todo This does't seem to handle ndarray subtypes the same way that "numpy.array" does in Python. */ -ndarray array(python::object const & obj); -ndarray array(python::object const & obj, dtype const & dt); +ndarray array(python::object const &obj); +ndarray array(python::object const &obj, dtype const &dt); -namespace detail -{ +namespace detail { -ndarray from_data_impl(void * data, - dtype const & dt, - std::vector const & shape, - std::vector const & strides, - python::object const & owner, - bool writeable); +ndarray from_data_impl(void *data, + dtype const &dt, + std::vector const &shape, + std::vector const &strides, + python::object const &owner, + bool writeable); template -ndarray from_data_impl(void * data, - dtype const & dt, - Container shape, - Container strides, - python::object const & owner, - bool writeable, - typename boost::enable_if< boost::is_integral >::type * enabled = NULL) -{ - std::vector shape_(shape.begin(),shape.end()); - std::vector strides_(strides.begin(), strides.end()); - return from_data_impl(data, dt, shape_, strides_, owner, writeable); +ndarray from_data_impl(void *data, + dtype const &dt, + Container shape, + Container strides, + python::object const &owner, + bool writeable, + typename boost::enable_if>::type *enabled = NULL) { + std::vector shape_(shape.begin(), shape.end()); + std::vector strides_(strides.begin(), strides.end()); + return from_data_impl(data, dt, shape_, strides_, owner, writeable); } -ndarray from_data_impl(void * data, - dtype const & dt, - python::object const & shape, - python::object const & strides, - python::object const & owner, - bool writeable); +ndarray from_data_impl(void *data, + dtype const &dt, + python::object const &shape, + python::object const &strides, + python::object const &owner, + bool writeable); -} // namespace Halide::numpy::detail +} // namespace Halide::numpy::detail /** * @brief Construct a new ndarray object from a raw pointer. @@ -206,14 +208,13 @@ ndarray from_data_impl(void * data, * @todo Should probably take ranges of iterators rather than actual container objects. */ template -inline ndarray from_data(void * data, - dtype const & dt, - Container shape, - Container strides, - python::object const & owner) -{ - return numpy::detail::from_data_impl(data, dt, shape, strides, owner, true); -} +inline ndarray from_data(void *data, + dtype const &dt, + Container shape, + Container strides, + python::object const &owner) { + return numpy::detail::from_data_impl(data, dt, shape, strides, owner, true); +} /** * @brief Construct a new ndarray object from a raw pointer. @@ -231,14 +232,13 @@ inline ndarray from_data(void * data, * @todo Should probably take ranges of iterators rather than actual container objects. */ template -inline ndarray from_data(void const * data, - dtype const & dt, - Container shape, - Container strides, - python::object const & owner) -{ - return numpy::detail::from_data_impl(const_cast(data), dt, shape, strides, owner, false); -} +inline ndarray from_data(void const *data, + dtype const &dt, + Container shape, + Container strides, + python::object const &owner) { + return numpy::detail::from_data_impl(const_cast(data), dt, shape, strides, owner, false); +} /** * @brief Transform an arbitrary object into a numpy array with the given requirements. @@ -250,57 +250,48 @@ inline ndarray from_data(void const * data, * @param[in] nd_max Maximum number of dimensions. * @param[in] flags Bitwise OR of flags specifying additional requirements. */ -ndarray from_object(python::object const & obj, dtype const & dt, - int nd_min, int nd_max, ndarray::bitflag flags=ndarray::NONE); +ndarray from_object(python::object const &obj, dtype const &dt, + int nd_min, int nd_max, ndarray::bitflag flags = ndarray::NONE); -inline ndarray from_object(python::object const & obj, dtype const & dt, - int nd, ndarray::bitflag flags=ndarray::NONE) -{ - return from_object(obj, dt, nd, nd, flags); +inline ndarray from_object(python::object const &obj, dtype const &dt, + int nd, ndarray::bitflag flags = ndarray::NONE) { + return from_object(obj, dt, nd, nd, flags); } -inline ndarray from_object(python::object const & obj, dtype const & dt, ndarray::bitflag flags=ndarray::NONE) -{ - return from_object(obj, dt, 0, 0, flags); +inline ndarray from_object(python::object const &obj, dtype const &dt, ndarray::bitflag flags = ndarray::NONE) { + return from_object(obj, dt, 0, 0, flags); } -ndarray from_object(python::object const & obj, int nd_min, int nd_max, - ndarray::bitflag flags=ndarray::NONE); +ndarray from_object(python::object const &obj, int nd_min, int nd_max, + ndarray::bitflag flags = ndarray::NONE); -inline ndarray from_object(python::object const & obj, int nd, ndarray::bitflag flags=ndarray::NONE) -{ - return from_object(obj, nd, nd, flags); +inline ndarray from_object(python::object const &obj, int nd, ndarray::bitflag flags = ndarray::NONE) { + return from_object(obj, nd, nd, flags); } -inline ndarray from_object(python::object const & obj, ndarray::bitflag flags=ndarray::NONE) -{ - return from_object(obj, 0, 0, flags); +inline ndarray from_object(python::object const &obj, ndarray::bitflag flags = ndarray::NONE) { + return from_object(obj, 0, 0, flags); } -inline ndarray::bitflag operator|(ndarray::bitflag a, ndarray::bitflag b) -{ - return ndarray::bitflag(int(a) | int(b)); +inline ndarray::bitflag operator|(ndarray::bitflag a, ndarray::bitflag b) { + return ndarray::bitflag(int(a) | int(b)); } -inline ndarray::bitflag operator&(ndarray::bitflag a, ndarray::bitflag b) -{ - return ndarray::bitflag(int(a) & int(b)); +inline ndarray::bitflag operator&(ndarray::bitflag a, ndarray::bitflag b) { + return ndarray::bitflag(int(a) & int(b)); } -} // namespace Halide::numpy -} // namespace Halide +} // namespace Halide::numpy +} // namespace Halide -namespace boost -{ -namespace python -{ -namespace converter -{ +namespace boost { +namespace python { +namespace converter { NUMPY_OBJECT_MANAGER_TRAITS(Halide::numpy::ndarray); -} // namespace boost::python::converter -} // namespace boost::python -} // namespace boost +} // namespace boost::python::converter +} // namespace boost::python +} // namespace boost -#endif // !HALIDE_NUMPY_NDARRAY_HPP_INCLUDED +#endif // !HALIDE_NUMPY_NDARRAY_HPP_INCLUDED diff --git a/python_bindings/numpy/numpy.cpp b/python_bindings/numpy/numpy.cpp index c557127f740e..aaaf50fe89ae 100644 --- a/python_bindings/numpy/numpy.cpp +++ b/python_bindings/numpy/numpy.cpp @@ -4,32 +4,28 @@ // http://www.boost.org/LICENSE_1_0.txt) #define HALIDE_NUMPY_INTERNAL_MAIN -#include "internal.hpp" #include "dtype.hpp" +#include "internal.hpp" -namespace Halide -{ -namespace numpy -{ +namespace Halide { +namespace numpy { #if PY_MAJOR_VERSION == 2 static void wrap_import_array() { import_array(); } #else -static void * wrap_import_array() { +static void *wrap_import_array() { import_array(); return nullptr; } #endif -void initialize(bool register_scalar_converters) -{ - wrap_import_array(); - import_ufunc(); - if (register_scalar_converters) - dtype::register_scalar_converters(); +void initialize(bool register_scalar_converters) { + wrap_import_array(); + import_ufunc(); + if (register_scalar_converters) + dtype::register_scalar_converters(); } - } } diff --git a/python_bindings/numpy/numpy.hpp b/python_bindings/numpy/numpy.hpp index 4ac4becda1d1..9128b4e2361b 100644 --- a/python_bindings/numpy/numpy.hpp +++ b/python_bindings/numpy/numpy.hpp @@ -26,9 +26,9 @@ namespace numpy { * and "import_ufunc()", and then calls * dtype::register_scalar_converters(). */ -void initialize(bool register_scalar_converters=true); +void initialize(bool register_scalar_converters = true); -} // namespace Halide::numpy -} // namespace Halide +} // namespace Halide::numpy +} // namespace Halide -#endif // !HALIDE_NUMPY_HPP_INCLUDED +#endif // !HALIDE_NUMPY_HPP_INCLUDED diff --git a/python_bindings/numpy/numpy_object_mgr_traits.hpp b/python_bindings/numpy/numpy_object_mgr_traits.hpp index 711554541054..ee25e7dec76d 100644 --- a/python_bindings/numpy/numpy_object_mgr_traits.hpp +++ b/python_bindings/numpy/numpy_object_mgr_traits.hpp @@ -11,25 +11,20 @@ * source-file implementation of get_pytype(). */ -#define NUMPY_OBJECT_MANAGER_TRAITS(manager) \ -template <> \ -struct object_manager_traits \ -{ \ - BOOST_STATIC_CONSTANT(bool, is_specialized = true); \ - static inline python::detail::new_reference adopt(PyObject* x) \ - { \ - return python::detail::new_reference(python::pytype_check((PyTypeObject*)get_pytype(), x)); \ - } \ - static bool check(PyObject* x) \ - { \ - return ::PyObject_IsInstance(x, (PyObject*)get_pytype()); \ - } \ - static manager* checked_downcast(PyObject* x) \ - { \ - return python::downcast((checked_downcast_impl)(x, (PyTypeObject*)get_pytype())); \ - } \ - static PyTypeObject const * get_pytype(); \ -} - -#endif // !HALIDE_NUMPY_NUMPY_OBJECT_MGR_TRAITS_HPP_INCLUDED +#define NUMPY_OBJECT_MANAGER_TRAITS(manager) \ + template <> \ + struct object_manager_traits { \ + BOOST_STATIC_CONSTANT(bool, is_specialized = true); \ + static inline python::detail::new_reference adopt(PyObject *x) { \ + return python::detail::new_reference(python::pytype_check((PyTypeObject *)get_pytype(), x)); \ + } \ + static bool check(PyObject *x) { \ + return ::PyObject_IsInstance(x, (PyObject *)get_pytype()); \ + } \ + static manager *checked_downcast(PyObject *x) { \ + return python::downcast((checked_downcast_impl)(x, (PyTypeObject *)get_pytype())); \ + } \ + static PyTypeObject const *get_pytype(); \ + } +#endif // !HALIDE_NUMPY_NUMPY_OBJECT_MGR_TRAITS_HPP_INCLUDED diff --git a/python_bindings/numpy/readme.text b/python_bindings/numpy/readme.text index f9c0bf5f3ed5..4397a270b861 100644 --- a/python_bindings/numpy/readme.text +++ b/python_bindings/numpy/readme.text @@ -1,5 +1,6 @@ This a boiled down version of -https://github.com/ndarray/Boost.NumPy + https : //github.com/ndarray/Boost.NumPy -many files are removed, and namespace changed from boost::numpy to -Halide::numpy to avoid clashes. \ No newline at end of file + many files are removed, + and namespace changed from boost::numpy to + Halide::numpy to avoid clashes. \ No newline at end of file diff --git a/python_bindings/python/Argument.cpp b/python_bindings/python/Argument.cpp index 9a1d895ba3cb..f0b146345800 100644 --- a/python_bindings/python/Argument.cpp +++ b/python_bindings/python/Argument.cpp @@ -9,82 +9,76 @@ namespace h = Halide; -void defineArgument() -{ +void defineArgument() { using Halide::Argument; namespace p = boost::python; auto argument_class = - p::class_("Argument", - "A struct representing an argument to a halide-generated function. " - "Used for specifying the function signature of generated code.", - p::init<>(p::arg("self"))); + p::class_("Argument", + "A struct representing an argument to a halide-generated function. " + "Used for specifying the function signature of generated code.", + p::init<>(p::arg("self"))); argument_class - .def(p::init( - (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions"), - p::arg("default"), p::arg("min"), p::arg("max")))) - .def(p::init( - (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions"), - p::arg("default")))) - .def(p::init( - (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions")))) - ; + .def(p::init( + (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions"), + p::arg("default"), p::arg("min"), p::arg("max")))) + .def(p::init( + (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions"), + p::arg("default")))) + .def(p::init( + (p::arg("self"), p::arg("name"), p::arg("kind"), p::arg("type"), p::arg("dimensions")))); argument_class - .def_readonly("name", &Argument::name, "The name of the argument."); + .def_readonly("name", &Argument::name, "The name of the argument."); //.property("name", &Argument::name, "The name of the argument.") //.def("name", // &argument_name, // getter instead of property to be consistent with other parts of the API // "The name of the argument."); p::enum_("ArgumentKind") - .value("InputScalar", Argument::Kind::InputScalar) - .value("InputBuffer", Argument::Kind::InputBuffer) - .value("OutputBuffer", Argument::Kind::OutputBuffer) - .export_values() - ; - + .value("InputScalar", Argument::Kind::InputScalar) + .value("InputBuffer", Argument::Kind::InputBuffer) + .value("OutputBuffer", Argument::Kind::OutputBuffer) + .export_values(); argument_class - //.def("kind", &argument_kind, - .def_readonly("kind", &Argument::kind, - //.def("kind", [](Argument &that) -> Argument::Kind { return that.kind; }, - //.def("kind", std::function( [](Argument &that) { return that.kind; } ), - "An argument is either a primitive type (for parameters), or a buffer pointer.\n" - "If kind == InputScalar, then type fully encodes the expected type of the scalar argument." - "If kind == InputBuffer|OutputBuffer, then type.bytes() should be used " - "to determine* elem_size of the buffer; additionally, type.code *should* " - "reflect the expected interpretation of the buffer data (e.g. float vs int), " - "but there is no runtime enforcement of this at present."); + //.def("kind", &argument_kind, + .def_readonly("kind", &Argument::kind, + //.def("kind", [](Argument &that) -> Argument::Kind { return that.kind; }, + //.def("kind", std::function( [](Argument &that) { return that.kind; } ), + "An argument is either a primitive type (for parameters), or a buffer pointer.\n" + "If kind == InputScalar, then type fully encodes the expected type of the scalar argument." + "If kind == InputBuffer|OutputBuffer, then type.bytes() should be used " + "to determine* elem_size of the buffer; additionally, type.code *should* " + "reflect the expected interpretation of the buffer data (e.g. float vs int), " + "but there is no runtime enforcement of this at present."); argument_class - .def_readonly("dimensions", &Argument::dimensions, - "If kind == InputBuffer|OutputBuffer, this is the dimensionality of the buffer. " - "If kind == InputScalar, this value is ignored (and should always be set to zero)"); + .def_readonly("dimensions", &Argument::dimensions, + "If kind == InputBuffer|OutputBuffer, this is the dimensionality of the buffer. " + "If kind == InputScalar, this value is ignored (and should always be set to zero)"); argument_class - .def_readonly("type", &Argument::type, - "If this is a scalar parameter, then this is its type. " - " If this is a buffer parameter, this is used to determine elem_size of the buffer_t. " - "Note that type.width should always be 1 here."); - + .def_readonly("type", &Argument::type, + "If this is a scalar parameter, then this is its type. " + " If this is a buffer parameter, this is used to determine elem_size of the buffer_t. " + "Note that type.width should always be 1 here."); argument_class - .def_readonly("default", &Argument::def, - "If this is a scalar parameter, then these are its default, min, max values. " - "By default, they are left unset, implying \"no default, no min, no max\". ") - .def_readonly("min", &Argument::min) - .def_readonly("max", &Argument::max); - + .def_readonly("default", &Argument::def, + "If this is a scalar parameter, then these are its default, min, max values. " + "By default, they are left unset, implying \"no default, no min, no max\". ") + .def_readonly("min", &Argument::min) + .def_readonly("max", &Argument::max); argument_class - .def("is_buffer", &Argument::is_buffer, p::arg("self"), - "An argument is either a primitive type (for parameters), or a buffer pointer. " - "If 'is_buffer' is true, then 'type' should be ignored.") - .def("is_scalar", &Argument::is_scalar, p::arg("self")) - .def("is_input", &Argument::is_input, p::arg("self")) - .def("is_output", &Argument::is_output, p::arg("self")); + .def("is_buffer", &Argument::is_buffer, p::arg("self"), + "An argument is either a primitive type (for parameters), or a buffer pointer. " + "If 'is_buffer' is true, then 'type' should be ignored.") + .def("is_scalar", &Argument::is_scalar, p::arg("self")) + .def("is_input", &Argument::is_input, p::arg("self")) + .def("is_output", &Argument::is_output, p::arg("self")); return; } diff --git a/python_bindings/python/Argument.h b/python_bindings/python/Argument.h index 62fde80b78a3..bde8d81d4a8c 100644 --- a/python_bindings/python/Argument.h +++ b/python_bindings/python/Argument.h @@ -3,4 +3,4 @@ void defineArgument(); -#endif // ARGUMENT_H +#endif // ARGUMENT_H diff --git a/python_bindings/python/BoundaryConditions.cpp b/python_bindings/python/BoundaryConditions.cpp index 64d97f8d3cc4..70d4ef45aa36 100644 --- a/python_bindings/python/BoundaryConditions.cpp +++ b/python_bindings/python/BoundaryConditions.cpp @@ -4,31 +4,30 @@ #include #include -#include "../../src/Lambda.h" // needed by BoundaryConditions.h -#include "../../src/ImageParam.h" #include "../../src/BoundaryConditions.h" #include "../../src/Func.h" +#include "../../src/ImageParam.h" +#include "../../src/Lambda.h" // needed by BoundaryConditions.h +#include #include #include -#include namespace h = Halide; namespace hb = Halide::BoundaryConditions; namespace p = boost::python; -template inline std::pair to_pair(const p::object& iterable) -{ +template +inline std::pair to_pair(const p::object &iterable) { return std::pair(p::extract(iterable[0]), p::extract(iterable[1])); } -template inline std::vector to_vector(const p::object& iterable) -{ +template +inline std::vector to_vector(const p::object &iterable) { return std::vector(p::stl_input_iterator(iterable), p::stl_input_iterator()); } -std::vector> inline pyobject_to_bounds(const p::object& pybounds) -{ +std::vector> inline pyobject_to_bounds(const p::object &pybounds) { std::vector intermediate = to_vector(pybounds); std::vector> result(intermediate.size()); std::transform(intermediate.begin(), intermediate.end(), result.begin(), to_pair); @@ -37,158 +36,135 @@ std::vector> inline pyobject_to_bounds(const p::obje namespace { -template -h::Func constant_exterior0(T func_like, h::Expr value) -{ +template +h::Func constant_exterior0(T func_like, h::Expr value) { return hb::constant_exterior(func_like, value); } -h::Func constant_exterior_bounds(h::Func func, h::Expr value, p::object bounds_) -{ +h::Func constant_exterior_bounds(h::Func func, h::Expr value, p::object bounds_) { return hb::constant_exterior(func, value, pyobject_to_bounds(bounds_)); } // C++ fun, variadic template recursive function ! -template -void def_constant_exterior_for_image() -{ +template +void def_constant_exterior_for_image() { p::def("constant_exterior", &constant_exterior0>, p::args("source", "value")); - def_constant_exterior_for_image(); // recursive call + def_constant_exterior_for_image(); // recursive call return; } -template<> -void def_constant_exterior_for_image() -{ // end of recursion +template <> +void def_constant_exterior_for_image() { // end of recursion return; } -} // end of anonymous namespace +} // end of anonymous namespace namespace { - -template -h::Func repeat_edge0(T func_like) -{ +template +h::Func repeat_edge0(T func_like) { return hb::repeat_edge(func_like); } -h::Func repeat_edge_bounds(h::Func func, p::object bounds_) -{ +h::Func repeat_edge_bounds(h::Func func, p::object bounds_) { return hb::repeat_edge(func, pyobject_to_bounds(bounds_)); } // C++ fun, variadic template recursive function ! -template -void def_repeat_edge_for_image() -{ +template +void def_repeat_edge_for_image() { p::def("repeat_edge", &repeat_edge0>, p::args("source")); - def_repeat_edge_for_image(); // recursive call + def_repeat_edge_for_image(); // recursive call return; } -template<> -void def_repeat_edge_for_image() -{ // end of recursion +template <> +void def_repeat_edge_for_image() { // end of recursion return; } -} // end of anonymous namespace +} // end of anonymous namespace namespace { -template -h::Func repeat_image0(T func_like) -{ +template +h::Func repeat_image0(T func_like) { return hb::repeat_image(func_like); } -h::Func repeat_image_bounds(h::Func func, p::object bounds_) -{ +h::Func repeat_image_bounds(h::Func func, p::object bounds_) { return hb::repeat_image(func, pyobject_to_bounds(bounds_)); } // C++ fun, variadic template recursive function ! -template -void def_repeat_image_for_image() -{ +template +void def_repeat_image_for_image() { p::def("repeat_image", &repeat_image0>, p::args("source")); - def_repeat_image_for_image(); // recursive call + def_repeat_image_for_image(); // recursive call return; } -template<> -void def_repeat_image_for_image() -{ // end of recursion +template <> +void def_repeat_image_for_image() { // end of recursion return; } -} // end of anonymous namespace +} // end of anonymous namespace namespace { - -template -h::Func mirror_image0(T func_like) -{ +template +h::Func mirror_image0(T func_like) { return hb::mirror_image(func_like); } -h::Func mirror_image_bounds(h::Func func, p::object bounds_) -{ +h::Func mirror_image_bounds(h::Func func, p::object bounds_) { return hb::mirror_image(func, pyobject_to_bounds(bounds_)); } // C++ fun, variadic template recursive function ! -template -void def_mirror_image_for_image() -{ +template +void def_mirror_image_for_image() { p::def("mirror_image", &mirror_image0>, p::args("source")); - def_mirror_image_for_image(); // recursive call + def_mirror_image_for_image(); // recursive call return; } -template<> -void def_mirror_image_for_image() -{ // end of recursion +template <> +void def_mirror_image_for_image() { // end of recursion return; } -} // end of anonymous namespace +} // end of anonymous namespace namespace { -template -h::Func mirror_interior0(T func_like) -{ +template +h::Func mirror_interior0(T func_like) { return hb::mirror_interior(func_like); } -h::Func mirror_interior_bounds(h::Func func, p::object bounds_) -{ +h::Func mirror_interior_bounds(h::Func func, p::object bounds_) { return hb::mirror_interior(func, pyobject_to_bounds(bounds_)); } // C++ fun, variadic template recursive function ! -template -void def_mirror_interior_for_image() -{ +template +void def_mirror_interior_for_image() { p::def("mirror_interior", &mirror_interior0>, p::args("source")); - def_mirror_interior_for_image(); // recursive call + def_mirror_interior_for_image(); // recursive call return; } -template<> -void def_mirror_interior_for_image() -{ // end of recursion +template <> +void def_mirror_interior_for_image() { // end of recursion return; } -} // end of anonymous namespace +} // end of anonymous namespace -void defineBoundaryConditions() -{ +void defineBoundaryConditions() { // constant_exterior p::def("constant_exterior", &constant_exterior0, p::args("source", "value"), @@ -203,13 +179,12 @@ void defineBoundaryConditions() " and putting value in the border of the texture.) "); def_constant_exterior_for_image< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double >(); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(); p::def("constant_exterior", &constant_exterior_bounds, p::args("source", "value", "bounds")); - // repeat_edge p::def("repeat_edge", &repeat_edge0, p::args("source"), @@ -221,13 +196,12 @@ void defineBoundaryConditions() "(This is similar to setting GL_TEXTURE_WRAP_* to GL_CLAMP_TO_EDGE.)"); def_repeat_edge_for_image< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double >(); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(); p::def("repeat_edge", &repeat_edge_bounds, p::args("source", "bounds")); - // repeat_image p::def("repeat_image", &repeat_image0, p::args("source"), @@ -239,13 +213,12 @@ void defineBoundaryConditions() "(This is similar to setting GL_TEXTURE_WRAP_* to GL_REPEAT.)"); def_repeat_image_for_image< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double >(); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(); p::def("repeat_image", &repeat_image_bounds, p::args("source", "bounds")); - // mirror_image p::def("mirror_image", &mirror_image0, p::args("source"), @@ -258,13 +231,12 @@ void defineBoundaryConditions() "(This is similar to setting GL_TEXTURE_WRAP_* to GL_MIRRORED_REPEAT.)"); def_mirror_image_for_image< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double >(); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(); p::def("mirror_image", &mirror_image_bounds, p::args("source", "bounds")); - // mirror_interior p::def("mirror_interior", &mirror_interior0, p::args("source"), @@ -278,9 +250,9 @@ void defineBoundaryConditions() "(I do not believe there is a direct GL_TEXTURE_WRAP_* equivalent for this.)"); def_mirror_interior_for_image< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double >(); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(); p::def("mirror_interior", &mirror_interior_bounds, p::args("source", "bounds")); diff --git a/python_bindings/python/BoundaryConditions.h b/python_bindings/python/BoundaryConditions.h index c41168a9d643..9377b526dde0 100644 --- a/python_bindings/python/BoundaryConditions.h +++ b/python_bindings/python/BoundaryConditions.h @@ -1,8 +1,6 @@ #ifndef BOUNDARYCONDITIONS_H #define BOUNDARYCONDITIONS_H - void defineBoundaryConditions(); - -#endif // BOUNDARYCONDITIONS_H +#endif // BOUNDARYCONDITIONS_H diff --git a/python_bindings/python/Error.cpp b/python_bindings/python/Error.cpp index bf286ba51853..5354ba721469 100644 --- a/python_bindings/python/Error.cpp +++ b/python_bindings/python/Error.cpp @@ -10,43 +10,35 @@ namespace h = Halide; namespace p = boost::python; - - -void translate_error(h::Error const& e) -{ +void translate_error(h::Error const &e) { // Use the Python 'C' API to set up an exception object PyErr_SetString(PyExc_RuntimeError, (std::string("Halide Error: ") + e.what()).c_str()); return; } -void translate_runtime_error(h::RuntimeError const& e) -{ +void translate_runtime_error(h::RuntimeError const &e) { // Use the Python 'C' API to set up an exception object PyErr_SetString(PyExc_RuntimeError, (std::string("Halide RuntimeError: ") + e.what()).c_str()); return; } -void translate_compile_error(h::CompileError const& e) -{ +void translate_compile_error(h::CompileError const &e) { // Use the Python 'C' API to set up an exception object PyErr_SetString(PyExc_RuntimeError, (std::string("Halide CompileError: ") + e.what()).c_str()); return; } -void translate_internal_error(h::InternalError const& e) -{ +void translate_internal_error(h::InternalError const &e) { // Use the Python 'C' API to set up an exception object PyErr_SetString(PyExc_RuntimeError, (std::string("Halide InternalError: ") + e.what()).c_str()); return; } - -void defineError() -{ +void defineError() { // Might create linking problems, if Param.cpp is not included in the python library p::register_exception_translator(&translate_error); diff --git a/python_bindings/python/Error.h b/python_bindings/python/Error.h index 8f692436bfe8..d5200552f2d0 100644 --- a/python_bindings/python/Error.h +++ b/python_bindings/python/Error.h @@ -3,4 +3,4 @@ void defineError(); -#endif // ERROR_H +#endif // ERROR_H diff --git a/python_bindings/python/Expr.cpp b/python_bindings/python/Expr.cpp index aa97748e0bed..5cf08c801ffa 100644 --- a/python_bindings/python/Expr.cpp +++ b/python_bindings/python/Expr.cpp @@ -2,11 +2,12 @@ // to avoid compiler confusion, python.hpp must be include before Halide headers #include + #include "add_operators.h" #include "../../src/Expr.h" -#include "../../src/Var.h" #include "../../src/IROperator.h" +#include "../../src/Var.h" #include "Type.h" @@ -31,7 +32,7 @@ p::object expr_vector_to_python_tuple(const std::vector &t) { std::vector python_tuple_to_expr_vector(const p::object &obj) { p::extract expr_extract(obj); if (expr_extract.check()) { - return {expr_extract()}; + return { expr_extract() }; } else { return python_collection_to_vector(obj); } @@ -53,42 +54,41 @@ h::Expr *expr_from_var_constructor(h::Var &var) { void defineExpr() { using Halide::Expr; - auto expr_class = p::class_("Expr", - "An expression or fragment of Halide code.\n" \ - "One can explicitly coerce most types to Expr via the Expr(x) constructor." \ - "The following operators are implemented over Expr, and also other types" \ - "such as Image, Func, Var, RVar generally coerce to Expr when used in arithmetic::\n\n" \ - "+ - * / % ** & |\n" \ - "-(unary) ~(unary)\n" \ - " < <= == != > >=\n" \ - "+= -= *= /=\n" \ - "The following math global functions are also available::\n" \ - "Unary:\n" \ - " abs acos acosh asin asinh atan atanh ceil cos cosh exp\n" \ - " fast_exp fast_log floor log round sin sinh sqrt tan tanh\n" \ - "Binary:\n" \ - " hypot fast_pow max min pow\n\n" \ - "Ternary:\n" \ - " clamp(x, lo, hi) -- Clamp expression to [lo, hi]\n" \ + "An expression or fragment of Halide code.\n" + "One can explicitly coerce most types to Expr via the Expr(x) constructor." + "The following operators are implemented over Expr, and also other types" + "such as Image, Func, Var, RVar generally coerce to Expr when used in arithmetic::\n\n" + "+ - * / % ** & |\n" + "-(unary) ~(unary)\n" + " < <= == != > >=\n" + "+= -= *= /=\n" + "The following math global functions are also available::\n" + "Unary:\n" + " abs acos acosh asin asinh atan atanh ceil cos cosh exp\n" + " fast_exp fast_log floor log round sin sinh sqrt tan tanh\n" + "Binary:\n" + " hypot fast_pow max min pow\n\n" + "Ternary:\n" + " clamp(x, lo, hi) -- Clamp expression to [lo, hi]\n" " select(cond, if_true, if_false) -- Return if_true if cond else if_false\n") - // constructor priority order is reverse from implicitly_convertible - // it important to declare int after float, after double. - .def(p::init(p::arg("self"))) - .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit float double. " - "Also emits a warning due to truncation.")) - .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit float (i.e. a FloatImm)")) - .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit int (i.e. an IntImm)")) - .def(p::init(p::arg("self"), "Make an expression representing a const string (i.e. a StringImm)")) - .def("__init__", - p::make_constructor(&expr_from_var_constructor, p::default_call_policies(), - p::arg("var")), "Cast a Var into an Expr") - - - .def("type", &Expr::type, p::arg("self"), - "Get the type of this expression") - .def("__repr__", &expr_repr, p::arg("self")); + // constructor priority order is reverse from implicitly_convertible + // it important to declare int after float, after double. + .def(p::init(p::arg("self"))) + .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit float double. " + "Also emits a warning due to truncation.")) + .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit float (i.e. a FloatImm)")) + .def(p::init(p::arg("self"), "Make an expression representing a const 32-bit int (i.e. an IntImm)")) + .def(p::init(p::arg("self"), "Make an expression representing a const string (i.e. a StringImm)")) + .def("__init__", + p::make_constructor(&expr_from_var_constructor, p::default_call_policies(), + p::arg("var")), + "Cast a Var into an Expr") + + .def("type", &Expr::type, p::arg("self"), + "Get the type of this expression") + .def("__repr__", &expr_repr, p::arg("self")); ; add_operators(expr_class); @@ -102,16 +102,15 @@ void defineExpr() { p::enum_("DeviceAPI", "An enum describing a type of device API. " "Used by schedules, and in the For loop IR node.") - /// Used to denote for loops that inherit their device from where they are used, generally the default - .value("None", h::DeviceAPI::None) - .value("Host", h::DeviceAPI::Host) - .value("Default_GPU", h::DeviceAPI::Default_GPU) - .value("CUDA", h::DeviceAPI::CUDA) - .value("OpenCL", h::DeviceAPI::OpenCL) - .value("GLSL", h::DeviceAPI::GLSL) - .value("Renderscript", h::DeviceAPI::Renderscript) - .export_values() - ; + /// Used to denote for loops that inherit their device from where they are used, generally the default + .value("None", h::DeviceAPI::None) + .value("Host", h::DeviceAPI::Host) + .value("Default_GPU", h::DeviceAPI::Default_GPU) + .value("CUDA", h::DeviceAPI::CUDA) + .value("OpenCL", h::DeviceAPI::OpenCL) + .value("GLSL", h::DeviceAPI::GLSL) + .value("Renderscript", h::DeviceAPI::Renderscript) + .export_values(); return; } diff --git a/python_bindings/python/Expr.h b/python_bindings/python/Expr.h index a0494bd2d728..1727ed0e7630 100644 --- a/python_bindings/python/Expr.h +++ b/python_bindings/python/Expr.h @@ -1,16 +1,16 @@ #ifndef EXPR_H #define EXPR_H -#include #include #include "../../src/Expr.h" +#include void defineExpr(); boost::python::object expr_vector_to_python_tuple(const std::vector &t); std::vector python_tuple_to_expr_vector(const boost::python::object &obj); -template +template std::vector python_collection_to_vector(const boost::python::object &obj) { std::vector result; for (ssize_t i = 0; i < boost::python::len(obj); i++) { @@ -19,4 +19,4 @@ std::vector python_collection_to_vector(const boost::python::object &obj) { return result; } -#endif // EXPR_H +#endif // EXPR_H diff --git a/python_bindings/python/Func.cpp b/python_bindings/python/Func.cpp index 931056db3cc6..0fd39507e895 100644 --- a/python_bindings/python/Func.cpp +++ b/python_bindings/python/Func.cpp @@ -1,22 +1,22 @@ #include "Func.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include #include "add_operators.h" +#include #include "../../src/Func.h" #include "Image.h" #include +#include "Expr.h" #include "Func_Ref.h" #include "Func_Stage.h" #include "Func_VarOrRVar.h" #include "Func_gpu.h" -#include "Expr.h" -#include #include +#include namespace h = Halide; namespace p = boost::python; @@ -47,17 +47,17 @@ h::Realization python_object_to_realization(p::object obj) { return h::Realization(images); } -template +template p::object func_realize(h::Func &f, Args... args) { return realization_to_python_object(f.realize(args...)); } -template +template void func_realize_into(h::Func &f, Args... args) { f.realize(args...); } -template +template void func_realize_tuple(h::Func &f, p::tuple obj, Args... args) { f.realize(python_object_to_realization(obj), args...); } @@ -80,7 +80,6 @@ void func_compile_to_bitcode0(h::Func &that, const std::string &filename, that.compile_to_bitcode(filename, args_vec, fn_name, target); } - BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_bitcode0_overloads, func_compile_to_bitcode0, 3, 5) void func_compile_to_object0(h::Func &that, const std::string &filename, @@ -91,7 +90,6 @@ void func_compile_to_object0(h::Func &that, const std::string &filename, that.compile_to_object(filename, args_vec, fn_name, target); } - BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_object0_overloads, func_compile_to_object0, 3, 5) void func_compile_to_header0(h::Func &that, const std::string &filename, @@ -102,7 +100,6 @@ void func_compile_to_header0(h::Func &that, const std::string &filename, that.compile_to_header(filename, args_vec, fn_name, target); } - BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_header0_overloads, func_compile_to_header0, 3, 5) void func_compile_to_assembly0(h::Func &that, const std::string &filename, @@ -113,7 +110,6 @@ void func_compile_to_assembly0(h::Func &that, const std::string &filename, that.compile_to_assembly(filename, args_vec, fn_name, target); } - BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_assembly0_overloads, func_compile_to_assembly0, 3, 5) void func_compile_to_c0(h::Func &that, const std::string &filename, @@ -126,7 +122,6 @@ void func_compile_to_c0(h::Func &that, const std::string &filename, BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_c0_overloads, func_compile_to_c0, 3, 5) - void func_compile_to_file0(h::Func &that, const std::string &filename_prefix, p::list args, const h::Target &target = h::get_target_from_environment()) { @@ -136,7 +131,6 @@ void func_compile_to_file0(h::Func &that, const std::string &filename_prefix, BOOST_PYTHON_FUNCTION_OVERLOADS(func_compile_to_file0_overloads, func_compile_to_file0, 3, 4) - void func_compile_to_lowered_stmt0(h::Func &that, const std::string &filename, p::list args, @@ -173,7 +167,7 @@ h::FuncRef func_getitem_operator(h::Func &func, p::object arg) { h::Stage func_setitem_operator(h::Func &func, p::object lhs, p::object rhs) { return (func(python_tuple_to_expr_vector(lhs)) = - h::Tuple(python_tuple_to_expr_vector(rhs))); + h::Tuple(python_tuple_to_expr_vector(rhs))); } std::string func_repr(const h::Func &func) { @@ -183,7 +177,6 @@ std::string func_repr(const h::Func &func) { return repr; } - void func_define_extern0(h::Func &that, const std::string &function_name, p::list params, @@ -199,7 +192,7 @@ void func_define_extern1(h::Func &that, p::list types, int dimensionality) { auto params_vec = python_collection_to_vector(params); - auto types_vec = python_collection_to_vector(types); + auto types_vec = python_collection_to_vector(types); return that.define_extern(function_name, params_vec, types_vec, dimensionality); } @@ -211,29 +204,27 @@ p::tuple func_output_types(h::Func &func) { return p::tuple(elts); } - void defineFunc() { using Halide::Func; using namespace func_and_stage_implementation_details; p::enum_("StmtOutputFormat") - .value("Text", h::StmtOutputFormat::Text) - .value("HTML", h::StmtOutputFormat::HTML) - .export_values() - ; + .value("Text", h::StmtOutputFormat::Text) + .value("HTML", h::StmtOutputFormat::HTML) + .export_values(); auto func_class = p::class_("Func", - "A halide function. This class represents one stage in a Halide" \ - "pipeline, and is the unit by which we schedule things. By default" \ - "they are aggressively inlined, so you are encouraged to make lots" \ - "of little functions, rather than storing things in Exprs.\n" \ - "Constructors::\n\n" \ - " Func() -- Declare a new undefined function with an automatically-generated unique name\n" \ - " Func(expr) -- Declare a new function with an automatically-generated unique\n" \ - " name, and define it to return the given expression (which may\n" \ - " not contain free variables).\n" \ + "A halide function. This class represents one stage in a Halide" + "pipeline, and is the unit by which we schedule things. By default" + "they are aggressively inlined, so you are encouraged to make lots" + "of little functions, rather than storing things in Exprs.\n" + "Constructors::\n\n" + " Func() -- Declare a new undefined function with an automatically-generated unique name\n" + " Func(expr) -- Declare a new function with an automatically-generated unique\n" + " name, and define it to return the given expression (which may\n" + " not contain free variables).\n" " Func(name) -- Declare a new undefined function with the given name", p::init<>(p::arg("self"))) .def(p::init(p::arg("self"))) @@ -351,33 +342,33 @@ void defineFunc() { "as this halide function.")); func_class.def("compile_to_object", &func_compile_to_object0, - func_compile_to_object0_overloads( - p::args("self", "filename", "args", "fn_name", "target"), - "Statically compile this function to an object file, with the " - "given filename (which should probably end in .o or .obj), type " - "signature, and C function name (which defaults to the same name " - "as this halide function. You probably don't want to use this " - "directly; call compile_to_file instead.")); + func_compile_to_object0_overloads( + p::args("self", "filename", "args", "fn_name", "target"), + "Statically compile this function to an object file, with the " + "given filename (which should probably end in .o or .obj), type " + "signature, and C function name (which defaults to the same name " + "as this halide function. You probably don't want to use this " + "directly; call compile_to_file instead.")); func_class.def("compile_to_header", &func_compile_to_header0, - func_compile_to_header0_overloads( - p::args("self", "filename", "args", "fn_name", "target"), - "Emit a header file with the given filename for this " - "function. The header will define a function with the type " - "signature given by the second argument, and a name given by the " - "third. The name defaults to the same name as this halide " - "function. You don't actually have to have defined this function " - "yet to call this. You probably don't want to use this directly; " - "call compile_to_file instead. ")); - - func_class.def("compile_to_assembly", &func_compile_to_assembly0, - func_compile_to_assembly0_overloads( - p::args("self", "filename", "args", "fn_name", "target"), - " Statically compile this function to text assembly equivalent " - " to the object file generated by compile_to_object. This is " - " useful for checking what Halide is producing without having to " - " disassemble anything, or if you need to feed the assembly into " - " some custom toolchain to produce an object file (e.g. iOS) ")); + func_compile_to_header0_overloads( + p::args("self", "filename", "args", "fn_name", "target"), + "Emit a header file with the given filename for this " + "function. The header will define a function with the type " + "signature given by the second argument, and a name given by the " + "third. The name defaults to the same name as this halide " + "function. You don't actually have to have defined this function " + "yet to call this. You probably don't want to use this directly; " + "call compile_to_file instead. ")); + + func_class.def("compile_to_assembly", &func_compile_to_assembly0, + func_compile_to_assembly0_overloads( + p::args("self", "filename", "args", "fn_name", "target"), + " Statically compile this function to text assembly equivalent " + " to the object file generated by compile_to_object. This is " + " useful for checking what Halide is producing without having to " + " disassemble anything, or if you need to feed the assembly into " + " some custom toolchain to produce an object file (e.g. iOS) ")); func_class.def("compile_to_c", &func_compile_to_c0, func_compile_to_c0_overloads( @@ -401,7 +392,7 @@ void defineFunc() { "wish to avoid including the time taken to compile a pipeline, " "then you can call this ahead of time. Returns the raw function " "pointer to the compiled pipeline.") - .def("compile_jit", &func_compile_jit0, p::arg("self")); + .def("compile_jit", &func_compile_jit0, p::arg("self")); func_class.def("debug_to_file", &Func::debug_to_file, p::args("self", "filename"), "When this function is compiled, include code that dumps its values " @@ -421,7 +412,6 @@ void defineFunc() { "Write out the loop nests specified by the schedule for this " "Function. Helpful for understanding what a schedule is doing."); - func_class.def("name", &Func::name, p::arg("self"), p::return_value_policy(), "The name of this function, either given during construction, or automatically generated."); @@ -465,10 +455,10 @@ void defineFunc() { "Get the reduction variables for an update definition, if there is one."); func_class - .def("has_update_definition", &Func::has_update_definition, p::arg("self"), - "Does this function have at least one update definition?") - .def("num_update_definitions", &Func::num_update_definitions, p::arg("self"), - "How many update definitions does this function have?"); + .def("has_update_definition", &Func::has_update_definition, p::arg("self"), + "Does this function have at least one update definition?") + .def("num_update_definitions", &Func::num_update_definitions, p::arg("self"), + "How many update definitions does this function have?"); func_class.def("is_extern", &Func::is_extern, p::arg("self"), "Is this function an external stage? That is, was it defined " @@ -497,7 +487,6 @@ void defineFunc() { func_class.def("dimensions", &Func::dimensions, p::arg("self"), "The dimensionality (number of arguments) of this function. Zero if the function is not yet defined."); - func_class.def("__getitem__", &func_getitem_operator, "If received a tuple of Vars\n\n" "Construct either the left-hand-side of a definition, or a call " @@ -525,20 +514,20 @@ void defineFunc() { "factor-1. The inner and outer subdimensions can then be dealt " "with using the other scheduling calls. It's ok to reuse the old " "variable name as either the inner or outer variable.") - .def("fuse", &Func::fuse, p::args("self", "inner", "outer", "fused"), - p::return_internal_reference<1>(), - "Join two dimensions into a single fused dimenion. The fused " - "dimension covers the product of the extents of the inner and " - "outer dimensions given.") - .def("serial", &Func::serial, p::args("self","var"), - p::return_internal_reference<1>(), - "Mark a dimension to be traversed serially. This is the default."); + .def("fuse", &Func::fuse, p::args("self", "inner", "outer", "fused"), + p::return_internal_reference<1>(), + "Join two dimensions into a single fused dimenion. The fused " + "dimension covers the product of the extents of the inner and " + "outer dimensions given.") + .def("serial", &Func::serial, p::args("self", "var"), + p::return_internal_reference<1>(), + "Mark a dimension to be traversed serially. This is the default."); func_class.def("parallel", &func_parallel0, p::args("self", "var"), p::return_internal_reference<1>(), "Mark a dimension (Var instance) to be traversed in parallel.") - .def("parallel", &func_parallel1, p::args("self", "var", "factor"), - p::return_internal_reference<1>()); + .def("parallel", &func_parallel1, p::args("self", "var", "factor"), + p::return_internal_reference<1>()); func_class.def("vectorize", &func_vectorize1, p::args("self", "var", "factor"), p::return_internal_reference<1>(), @@ -547,8 +536,8 @@ void defineFunc() { "size. The variable to be vectorized should be the innermost " "one. After this call, var refers to the outer dimension of the " "split.") - .def("vectorize", &func_vectorize0, p::args("self", "var"), - p::return_internal_reference<1>()); + .def("vectorize", &func_vectorize0, p::args("self", "var"), + p::return_internal_reference<1>()); func_class.def("unroll", &func_unroll1, p::args("self", "var", "factor"), p::return_internal_reference<1>(), @@ -556,10 +545,10 @@ void defineFunc() { "dimension. This is how you unroll a loop of unknown size by " "some constant factor. After this call, var refers to the outer " "dimension of the split.") - .def("unroll", &func_unroll0, p::args("self", "var"), - p::return_internal_reference<1>()); + .def("unroll", &func_unroll0, p::args("self", "var"), + p::return_internal_reference<1>()); - func_class.def("bound", &Func::bound, p::args("self", "var", "min", "extent"), + func_class.def("bound", &Func::bound, p::args("self", "var", "min", "extent"), p::return_internal_reference<1>(), "Statically declare that the range over which a function should " "be evaluated is given by the second and third arguments. This " @@ -570,13 +559,13 @@ void defineFunc() { "more of this function than the bounds you have stated, a " "runtime error will occur when you try to run your pipeline. "); - func_class.def("tile", &func_tile0, p::args("self", "x", "y", "xo", "yo", "xi", "yi", "xfactor", "yfactor"), + func_class.def("tile", &func_tile0, p::args("self", "x", "y", "xo", "yo", "xi", "yi", "xfactor", "yfactor"), p::return_internal_reference<1>(), "Split two dimensions at once by the given factors, and then " "reorder the resulting dimensions to be xi, yi, xo, yo from " "innermost outwards. This gives a tiled traversal."); - func_class.def("tile", &func_tile1, p::args("self", "x", "y", "xi", "yi", "xfactor", "yfactor"), + func_class.def("tile", &func_tile1, p::args("self", "x", "y", "xi", "yi", "xfactor", "yfactor"), p::return_internal_reference<1>(), "A shorter form of tile, which reuses the old variable names as the new outer dimensions"); @@ -584,56 +573,55 @@ void defineFunc() { p::return_internal_reference<1>(), "Reorder variables to have the given nesting order, " "from innermost out") - .def("reorder", &func_reorder0, p::args("self", "vars"), - p::return_internal_reference<1>(), - "Reorder variables to have the given nesting order, " - "from innermost out") - .def("reorder", &func_reorder1, (p::arg("self"), p::arg("v0"), p::arg("v1")=p::object(), - p::arg("v2")=p::object(), p::arg("v3")=p::object(), - p::arg("v4")=p::object(), p::arg("v5")=p::object()), - p::return_internal_reference<1>(), - "Reorder variables to have the given nesting order, " - "from innermost out"); + .def("reorder", &func_reorder0, p::args("self", "vars"), + p::return_internal_reference<1>(), + "Reorder variables to have the given nesting order, " + "from innermost out") + .def("reorder", &func_reorder1, (p::arg("self"), p::arg("v0"), p::arg("v1") = p::object(), + p::arg("v2") = p::object(), p::arg("v3") = p::object(), + p::arg("v4") = p::object(), p::arg("v5") = p::object()), + p::return_internal_reference<1>(), + "Reorder variables to have the given nesting order, " + "from innermost out"); func_class.def("rename", &Func::rename, p::args("self", "old_name", "new_name"), p::return_internal_reference<1>(), "Rename a dimension. Equivalent to split with a inner size of one."); - const std::string reorder_storage_doc = \ - "Specify how the storage for the function is laid out. These " - "calls let you specify the nesting order of the dimensions. For " - "example, foo.reorder_storage(y, x) tells Halide to use " - "column-major storage for any realizations of foo, without " - "changing how you refer to foo in the code. You may want to do " - "this if you intend to vectorize across y. When representing " - "color images, foo.reorder_storage(c, x, y) specifies packed " - "storage (red, green, and blue values adjacent in memory), and " - "foo.reorder_storage(x, y, c) specifies planar storage (entire " - "red, green, and blue images one after the other in memory).\n\n" - "If you leave out some dimensions, those remain in the same " - "positions in the nesting order while the specified variables " - "are reordered around them."; + const std::string reorder_storage_doc = + "Specify how the storage for the function is laid out. These " + "calls let you specify the nesting order of the dimensions. For " + "example, foo.reorder_storage(y, x) tells Halide to use " + "column-major storage for any realizations of foo, without " + "changing how you refer to foo in the code. You may want to do " + "this if you intend to vectorize across y. When representing " + "color images, foo.reorder_storage(c, x, y) specifies packed " + "storage (red, green, and blue values adjacent in memory), and " + "foo.reorder_storage(x, y, c) specifies planar storage (entire " + "red, green, and blue images one after the other in memory).\n\n" + "If you leave out some dimensions, those remain in the same " + "positions in the nesting order while the specified variables " + "are reordered around them."; func_class.def("reorder_storage", &func_reorder_storage0, p::args("self", "dims"), p::return_internal_reference<1>(), reorder_storage_doc.c_str()) - .def("reorder_storage", &func_reorder_storage0, p::args("self", "dims"), - p::return_internal_reference<1>(), reorder_storage_doc.c_str()) - .def("reorder_storage", &func_reorder_storage1, (p::arg("self"), p::arg("v0"), p::arg("v1"), - p::arg("v2")=p::object(), p::arg("v3")=p::object(), - p::arg("v4")=p::object(), p::arg("v5")=p::object()), - p::return_internal_reference<1>(), reorder_storage_doc.c_str()); + .def("reorder_storage", &func_reorder_storage0, p::args("self", "dims"), + p::return_internal_reference<1>(), reorder_storage_doc.c_str()) + .def("reorder_storage", &func_reorder_storage1, (p::arg("self"), p::arg("v0"), p::arg("v1"), + p::arg("v2") = p::object(), p::arg("v3") = p::object(), + p::arg("v4") = p::object(), p::arg("v5") = p::object()), + p::return_internal_reference<1>(), reorder_storage_doc.c_str()); func_class.def("compute_at", &func_compute_at0, p::args("self", "f", "var"), p::return_internal_reference<1>(), "Compute this function as needed for each unique value of the " "given var (can be a Var or an RVar) for the given calling function f.") - .def("compute_at", &func_compute_at1, p::args("self", "f", "var"), - p::return_internal_reference<1>()); + .def("compute_at", &func_compute_at1, p::args("self", "f", "var"), + p::return_internal_reference<1>()); func_class.def("compute_root", &Func::compute_root, p::arg("self"), p::return_internal_reference<1>(), "Compute all of this function once ahead of time."); - func_class.def("store_at", &func_store_at0, p::args("self", "f", "var"), p::return_internal_reference<1>(), "Allocate storage for this function within f's loop over " @@ -641,8 +629,8 @@ void defineFunc() { "separate the loop level at which storage occurs from the loop " "level at which computation occurs to trade off between locality " "and redundant work.") - .def("store_at", &func_store_at1, p::args("self", "f", "var"), - p::return_internal_reference<1>()); + .def("store_at", &func_store_at1, p::args("self", "f", "var"), + p::return_internal_reference<1>()); func_class.def("store_root", &Func::store_root, p::arg("self"), p::return_internal_reference<1>(), @@ -655,7 +643,7 @@ void defineFunc() { "a reduction, that means it gets computed as close to the " "innermost loop as possible."); - func_class.def("update", &Func::update, (p::arg("self"), p::arg("idx")=0), + func_class.def("update", &Func::update, (p::arg("self"), p::arg("idx") = 0), "Get a handle on the update step of a reduction for the " "purposes of scheduling it. Only the pure dimensions of the " "update step can be meaningfully manipulated (see RDom)."); @@ -663,17 +651,17 @@ void defineFunc() { func_class.def("function", &Func::function, p::arg("self"), "Get a handle on the internal halide function that this Func represents. " "Useful if you want to do introspection on Halide functions.") - .def("trace_loads", &Func::trace_loads, p::arg("self"), - p::return_internal_reference<1>(), - "Trace all loads from this Func by emitting calls to " - "halide_trace. If the Func is inlined, this has no effect.") - .def("trace_stores", &Func::trace_stores, p::arg("self"), - p::return_internal_reference<1>(), - "Trace all stores to the buffer backing this Func by emitting " - "calls to halide_trace. If the Func is inlined, this call has no effect.") - .def("trace_realizations", &Func::trace_realizations, p::arg("self"), - p::return_internal_reference<1>(), - "Trace all realizations of this Func by emitting calls to halide_trace."); + .def("trace_loads", &Func::trace_loads, p::arg("self"), + p::return_internal_reference<1>(), + "Trace all loads from this Func by emitting calls to " + "halide_trace. If the Func is inlined, this has no effect.") + .def("trace_stores", &Func::trace_stores, p::arg("self"), + p::return_internal_reference<1>(), + "Trace all stores to the buffer backing this Func by emitting " + "calls to halide_trace. If the Func is inlined, this call has no effect.") + .def("trace_realizations", &Func::trace_realizations, p::arg("self"), + p::return_internal_reference<1>(), + "Trace all realizations of this Func by emitting calls to halide_trace."); func_class.def("specialize", &Func::specialize, p::args("self", "condition"), "Specialize a Func. This creates a special-case version of the " diff --git a/python_bindings/python/Func.h b/python_bindings/python/Func.h index f9063ddccd25..2c207f482375 100644 --- a/python_bindings/python/Func.h +++ b/python_bindings/python/Func.h @@ -5,12 +5,12 @@ #include #include -#include #include +#include +#include "../../src/Expr.h" #include "../../src/Func.h" #include "../../src/Var.h" -#include "../../src/Expr.h" void defineFunc(); @@ -21,86 +21,69 @@ namespace func_and_stage_implementation_details { namespace hh = Halide; namespace bp = boost::python; -template -FuncOrStage &func_parallel0(FuncOrStage &that, hh::VarOrRVar var) -{ +template +FuncOrStage &func_parallel0(FuncOrStage &that, hh::VarOrRVar var) { return that.parallel(var); } -template -FuncOrStage &func_parallel1(FuncOrStage &that, hh::VarOrRVar var, int factor) -{ +template +FuncOrStage &func_parallel1(FuncOrStage &that, hh::VarOrRVar var, int factor) { return that.parallel(var, factor); } -template -FuncOrStage &func_split(FuncOrStage &that, hh::VarOrRVar var, hh::VarOrRVar outer, hh::VarOrRVar inner, int factor) -{ +template +FuncOrStage &func_split(FuncOrStage &that, hh::VarOrRVar var, hh::VarOrRVar outer, hh::VarOrRVar inner, int factor) { return that.split(var, outer, inner, factor); } - -template -FuncOrStage &func_vectorize0(FuncOrStage &that, hh::VarOrRVar var) -{ +template +FuncOrStage &func_vectorize0(FuncOrStage &that, hh::VarOrRVar var) { return that.vectorize(var); } -template -FuncOrStage &func_vectorize1(FuncOrStage &that, hh::VarOrRVar var, int factor) -{ +template +FuncOrStage &func_vectorize1(FuncOrStage &that, hh::VarOrRVar var, int factor) { return that.vectorize(var, factor); } - -template -FuncOrStage &func_unroll0(FuncOrStage &that, hh::VarOrRVar var) -{ +template +FuncOrStage &func_unroll0(FuncOrStage &that, hh::VarOrRVar var) { return that.unroll(var); } -template -FuncOrStage &func_unroll1(FuncOrStage &that, hh::VarOrRVar var, int factor) -{ +template +FuncOrStage &func_unroll1(FuncOrStage &that, hh::VarOrRVar var, int factor) { return that.unroll(var, factor); } -template +template FuncOrStage &func_tile0(FuncOrStage &that, hh::VarOrRVar x, hh::VarOrRVar y, hh::VarOrRVar xo, hh::VarOrRVar yo, hh::VarOrRVar xi, hh::VarOrRVar yi, - hh::Expr xfactor, hh::Expr yfactor) -{ + hh::Expr xfactor, hh::Expr yfactor) { return that.tile(x, y, xo, yo, xi, yi, xfactor, yfactor); } -template +template FuncOrStage &func_tile1(FuncOrStage &that, hh::VarOrRVar x, hh::VarOrRVar y, hh::VarOrRVar xi, hh::VarOrRVar yi, - hh::Expr xfactor, hh::Expr yfactor) -{ + hh::Expr xfactor, hh::Expr yfactor) { return that.tile(x, y, xi, yi, xfactor, yfactor); } -template -FuncOrStage &func_reorder0(FuncOrStage &that, PythonIterable args_passed) -{ +template +FuncOrStage &func_reorder0(FuncOrStage &that, PythonIterable args_passed) { std::vector var_or_rvar_args; const size_t args_len = bp::len(args_passed); - for(size_t i=0; i < args_len; i+=1) - { + for (size_t i = 0; i < args_len; i += 1) { bp::object o = args_passed[i]; bp::extract var_or_rvar_extract(o); - if(var_or_rvar_extract.check()) - { + if (var_or_rvar_extract.check()) { var_or_rvar_args.push_back(var_or_rvar_extract()); - } - else - { - for(size_t j=0; j < args_len; j+=1) - { + } else { + for (size_t j = 0; j < args_len; j += 1) { bp::object o = args_passed[j]; const std::string o_str = bp::extract(bp::str(o)); printf("Func::reorder args_passed[%lu] == %s\n", j, o_str.c_str()); @@ -112,15 +95,12 @@ FuncOrStage &func_reorder0(FuncOrStage &that, PythonIterable args_passed) return that.reorder(var_or_rvar_args); } -template +template FuncOrStage &func_reorder1(FuncOrStage &that, bp::object v0, - bp::object v1, bp::object v2, bp::object v3, bp::object v4, bp::object v5) -{ + bp::object v1, bp::object v2, bp::object v3, bp::object v4, bp::object v5) { bp::list args_list; - for(const bp::object &v : {v0, v1, v2, v3, v4, v5}) - { - if(not v.is_none()) - { + for (const bp::object &v : { v0, v1, v2, v3, v4, v5 }) { + if (not v.is_none()) { args_list.append(v); } } @@ -128,25 +108,19 @@ FuncOrStage &func_reorder1(FuncOrStage &that, bp::object v0, return func_reorder0(that, args_list); } -template -FuncOrStage &func_reorder_storage0(FuncOrStage &that, PythonIterable args_passed) -{ +template +FuncOrStage &func_reorder_storage0(FuncOrStage &that, PythonIterable args_passed) { std::vector var_args; const size_t args_len = bp::len(args_passed); - for(size_t i=0; i < args_len; i+=1) - { + for (size_t i = 0; i < args_len; i += 1) { bp::object o = args_passed[i]; bp::extract var_extract(o); - if(var_extract.check()) - { + if (var_extract.check()) { var_args.push_back(var_extract()); - } - else - { - for(size_t j=0; j < args_len; j+=1) - { + } else { + for (size_t j = 0; j < args_len; j += 1) { bp::object o = args_passed[j]; const std::string o_str = bp::extract(bp::str(o)); printf("Func::reorder_storage args_passed[%lu] == %s\n", j, o_str.c_str()); @@ -158,16 +132,13 @@ FuncOrStage &func_reorder_storage0(FuncOrStage &that, PythonIterable args_passed return that.reorder_storage(var_args); } -template +template FuncOrStage &func_reorder_storage1(FuncOrStage &that, bp::object v0, bp::object v1, bp::object v2, - bp::object v3, bp::object v4, bp::object v5) -{ + bp::object v3, bp::object v4, bp::object v5) { bp::list args_list; - for(const bp::object &v : {v0, v1, v2, v3, v4, v5}) - { - if(not v.is_none()) - { + for (const bp::object &v : { v0, v1, v2, v3, v4, v5 }) { + if (not v.is_none()) { args_list.append(v); } } @@ -175,7 +146,6 @@ FuncOrStage &func_reorder_storage1(FuncOrStage &that, bp::object v0, return func_reorder_storage0(that, args_list); } +} // end of namespace func_and_stage_implementation_details -} // end of namespace func_and_stage_implementation_details - -#endif // FUNC_H +#endif // FUNC_H diff --git a/python_bindings/python/Func_Ref.cpp b/python_bindings/python/Func_Ref.cpp index ff5b49558a85..433fdf282a87 100644 --- a/python_bindings/python/Func_Ref.cpp +++ b/python_bindings/python/Func_Ref.cpp @@ -1,22 +1,20 @@ #include "Func_Ref.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include #include "add_operators.h" +#include #include "../../src/Func.h" #include "../../src/Tuple.h" -#include #include +#include namespace h = Halide; namespace p = boost::python; - -template -A& iadd_func(A a, B b) -{ +template +A &iadd_func(A a, B b) { a += b; // for FuncRef this will create a stage, // but in python the return object replaces the caller, @@ -24,38 +22,34 @@ A& iadd_func(A a, B b) return a; } -template -A& isub_func(A a, B b) -{ +template +A &isub_func(A a, B b) { a -= b; return a; } -template -A& imul_func(A a, B b) -{ +template +A &imul_func(A a, B b) { a *= b; return a; } -template -A& idiv_func(A a, B b) -{ +template +A &idiv_func(A a, B b) { a /= b; return a; } -void defineFuncTupleElementRef() -{ +void defineFuncTupleElementRef() { using Halide::FuncTupleElementRef; auto func_tuple_element_ref_class = - p::class_("FuncTupleElementRef", - "A fragment of front-end syntax of the form f(x, y, z)[index], where x, " - "y, z are Vars or Exprs. It could be the left-hand side of an update " - "definition, or it could be a call to a function. We don't know " - "until we see how this object gets used.", - p::no_init) + p::class_("FuncTupleElementRef", + "A fragment of front-end syntax of the form f(x, y, z)[index], where x, " + "y, z are Vars or Exprs. It could be the left-hand side of an update " + "definition, or it could be a call to a function. We don't know " + "until we see how this object gets used.", + p::no_init) //FuncTupleElementRef(const FuncRef &ref, const std::vector& args, int idx); // .def("__??__", FuncTupleElementRef::operator=(Expr); @@ -96,7 +90,6 @@ void defineFuncTupleElementRef() //Stage operator=(const FuncRef &); //FIXME implement __setitem__ - // .def("to_Expr", &FuncTupleElementRef::operator Expr, // "Use this as a call to Tuple component 'idx' of a Func, and not the " // "left-hand-side of a definition.") @@ -104,8 +97,7 @@ void defineFuncTupleElementRef() .def("function", &FuncTupleElementRef::function, "What function is this calling?") .def("index", &FuncTupleElementRef::index, - "Return index to the function outputs.") - ; + "Return index to the function outputs."); typedef decltype(func_tuple_element_ref_class) func_tuple_element_ref_class_t; typedef func_tuple_element_ref_class_t fterc_t; @@ -121,18 +113,16 @@ void defineFuncTupleElementRef() return; } - -void defineFuncRefExprClass() -{ +void defineFuncRefExprClass() { using Halide::FuncRef; auto func_ref_expr_class = - p::class_("FuncRef", - "A fragment of front-end syntax of the form f(x, y, z), where x, y, " - "z are Vars or Exprs. If could be the left hand side of a definition or an " - "update definition, or it could be a call to a function. We don't know " - "until we see how this object gets used. ", - p::no_init) + p::class_("FuncRef", + "A fragment of front-end syntax of the form f(x, y, z), where x, y, " + "z are Vars or Exprs. If could be the left hand side of a definition or an " + "update definition, or it could be a call to a function. We don't know " + "until we see how this object gets used. ", + p::no_init) // FuncRef(Internal::Function, const std::vector &, // int placeholder_pos = -1); // FuncRef(Internal::Function, const std::vector &, @@ -178,7 +168,7 @@ void defineFuncRefExprClass() // "Use this as a call to the function, and not the left-hand-side" // "of a definition. Only works for single-output Funcs.") - .def("__getitem__", &FuncRef::operator [], + .def("__getitem__", &FuncRef::operator[], "When a FuncRef refers to a function that provides multiple " "outputs, you can access each output as an Expr using " "operator[].") @@ -188,8 +178,7 @@ void defineFuncRefExprClass() "How many outputs does the function this refers to produce.") .def("function", &FuncRef::function, - "What function is this calling?") - ; + "What function is this calling?"); typedef decltype(func_ref_expr_class) func_ref_expr_class_t; typedef func_ref_expr_class_t frec_t; @@ -205,8 +194,7 @@ void defineFuncRefExprClass() return; } -void defineFuncRef() -{ +void defineFuncRef() { // only defined so that boost::python knows about these class, // not (yet) meant to be created or manipulated by the user p::class_("InternalFunction", p::no_init); diff --git a/python_bindings/python/Func_Ref.h b/python_bindings/python/Func_Ref.h index f828746879cd..7bf3c41da7b8 100644 --- a/python_bindings/python/Func_Ref.h +++ b/python_bindings/python/Func_Ref.h @@ -3,4 +3,4 @@ void defineFuncRef(); -#endif // FUNC_REF_H +#endif // FUNC_REF_H diff --git a/python_bindings/python/Func_Stage.cpp b/python_bindings/python/Func_Stage.cpp index 70127e20f815..fdd7f8b06bba 100644 --- a/python_bindings/python/Func_Stage.cpp +++ b/python_bindings/python/Func_Stage.cpp @@ -14,16 +14,14 @@ namespace h = Halide; namespace p = boost::python; - -void defineStage() -{ +void defineStage() { using Halide::Stage; using namespace func_and_stage_implementation_details; // only defined so that boost::python knows about these classes, // not (yet) meant to be created or manipulated by the user auto stage_class = - p::class_("Stage", p::no_init) + p::class_("Stage", p::no_init) // Stage(Internal::Schedule s, const std::string &n) : .def("dump_argument_list", &Stage::dump_argument_list, p::arg("self"), @@ -34,35 +32,33 @@ void defineStage() p::return_value_policy(), "Return the name of this stage, e.g. \"f.update(2)\"") .def("allow_race_conditions", &Stage::allow_race_conditions, p::arg("self"), - p::return_internal_reference<1>()) - ; - + p::return_internal_reference<1>()); // Scheduling calls that control how the domain of this stage is traversed. // "See the documentation for Func for the meanings." stage_class - .def("split", &func_split, p::args("self", "old", "outer", "inner", "factor"), - p::return_internal_reference<1>(), - "Split a dimension into inner and outer subdimensions with the " - "given names, where the inner dimension iterates from 0 to " - "factor-1. The inner and outer subdimensions can then be dealt " - "with using the other scheduling calls. It's ok to reuse the old " - "variable name as either the inner or outer variable.") - .def("fuse", &Stage::fuse, p::args("self", "inner", "outer", "fused"), - p::return_internal_reference<1>(), - "Join two dimensions into a single fused dimenion. The fused " - "dimension covers the product of the extents of the inner and " - "outer dimensions given.") - .def("serial", &Stage::serial, p::args("self","var"), - p::return_internal_reference<1>(), - "Mark a dimension to be traversed serially. This is the default."); + .def("split", &func_split, p::args("self", "old", "outer", "inner", "factor"), + p::return_internal_reference<1>(), + "Split a dimension into inner and outer subdimensions with the " + "given names, where the inner dimension iterates from 0 to " + "factor-1. The inner and outer subdimensions can then be dealt " + "with using the other scheduling calls. It's ok to reuse the old " + "variable name as either the inner or outer variable.") + .def("fuse", &Stage::fuse, p::args("self", "inner", "outer", "fused"), + p::return_internal_reference<1>(), + "Join two dimensions into a single fused dimenion. The fused " + "dimension covers the product of the extents of the inner and " + "outer dimensions given.") + .def("serial", &Stage::serial, p::args("self", "var"), + p::return_internal_reference<1>(), + "Mark a dimension to be traversed serially. This is the default."); stage_class.def("parallel", &func_parallel0, p::args("self", "var"), p::return_internal_reference<1>(), "Mark a dimension (Var instance) to be traversed in parallel.") - .def("parallel", &func_parallel1, p::args("self", "var", "factor"), - p::return_internal_reference<1>()); + .def("parallel", &func_parallel1, p::args("self", "var", "factor"), + p::return_internal_reference<1>()); stage_class.def("vectorize", &func_vectorize1, p::args("self", "var", "factor"), p::return_internal_reference<1>(), @@ -71,8 +67,8 @@ void defineStage() "size. The variable to be vectorized should be the innermost " "one. After this call, var refers to the outer dimension of the " "split.") - .def("vectorize", &func_vectorize0, p::args("self", "var"), - p::return_internal_reference<1>()); + .def("vectorize", &func_vectorize0, p::args("self", "var"), + p::return_internal_reference<1>()); stage_class.def("unroll", &func_unroll1, p::args("self", "var", "factor"), p::return_internal_reference<1>(), @@ -80,43 +76,42 @@ void defineStage() "dimension. This is how you unroll a loop of unknown size by " "some constant factor. After this call, var refers to the outer " "dimension of the split.") - .def("unroll", &func_unroll0, p::args("self", "var"), - p::return_internal_reference<1>()); + .def("unroll", &func_unroll0, p::args("self", "var"), + p::return_internal_reference<1>()); - stage_class.def("tile", &func_tile0, p::args("self", "x", "y", "xo", "yo", "xi", "yi", "xfactor", "yfactor"), + stage_class.def("tile", &func_tile0, p::args("self", "x", "y", "xo", "yo", "xi", "yi", "xfactor", "yfactor"), p::return_internal_reference<1>(), "Split two dimensions at once by the given factors, and then " "reorder the resulting dimensions to be xi, yi, xo, yo from " "innermost outwards. This gives a tiled traversal.") - .def("tile", &func_tile1, p::args("self", "x", "y", "xi", "yi", "xfactor", "yfactor"), - p::return_internal_reference<1>(), - "A shorter form of tile, which reuses the old variable names as the new outer dimensions"); - + .def("tile", &func_tile1, p::args("self", "x", "y", "xi", "yi", "xfactor", "yfactor"), + p::return_internal_reference<1>(), + "A shorter form of tile, which reuses the old variable names as the new outer dimensions"); stage_class.def("reorder", &func_reorder0, p::args("self", "vars"), p::return_internal_reference<1>(), "Reorder variables to have the given nesting order, " "from innermost out") - .def("reorder", &func_reorder0, p::args("self", "vars"), - p::return_internal_reference<1>(), - "Reorder variables to have the given nesting order, " - "from innermost out") - .def("reorder", &func_reorder1, (p::arg("self"), p::arg("v0"), p::arg("v1")=p::object(), - p::arg("v2")=p::object(), p::arg("v3")=p::object(), - p::arg("v4")=p::object(), p::arg("v5")=p::object()), - p::return_internal_reference<1>(), - "Reorder variables to have the given nesting order, " - "from innermost out"); + .def("reorder", &func_reorder0, p::args("self", "vars"), + p::return_internal_reference<1>(), + "Reorder variables to have the given nesting order, " + "from innermost out") + .def("reorder", &func_reorder1, (p::arg("self"), p::arg("v0"), p::arg("v1") = p::object(), + p::arg("v2") = p::object(), p::arg("v3") = p::object(), + p::arg("v4") = p::object(), p::arg("v5") = p::object()), + p::return_internal_reference<1>(), + "Reorder variables to have the given nesting order, " + "from innermost out"); stage_class.def("rename", &Stage::rename, p::args("self", "old_name", "new_name"), p::return_internal_reference<1>(), "Rename a dimension. Equivalent to split with a inner size of one."); stage_class.def("specialize", &Stage::specialize, p::args("self", "condition"), - "Specialize a Func (Stage). This creates a special-case version of the " - "Func where the given condition is true. The most effective " - "conditions are those of the form param == value, and boolean " - "Params. See C++ documentation for more details."); + "Specialize a Func (Stage). This creates a special-case version of the " + "Func where the given condition is true. The most effective " + "conditions are those of the form param == value, and boolean " + "Params. See C++ documentation for more details."); defineFuncOrStageGpuMethods(stage_class); diff --git a/python_bindings/python/Func_Stage.h b/python_bindings/python/Func_Stage.h index e5541ccc5178..97cdaf24f2cd 100644 --- a/python_bindings/python/Func_Stage.h +++ b/python_bindings/python/Func_Stage.h @@ -3,4 +3,4 @@ void defineStage(); -#endif // FUNC_STAGE_H +#endif // FUNC_STAGE_H diff --git a/python_bindings/python/Func_VarOrRVar.cpp b/python_bindings/python/Func_VarOrRVar.cpp index ce4cbbdc1f39..e06af23aa704 100644 --- a/python_bindings/python/Func_VarOrRVar.cpp +++ b/python_bindings/python/Func_VarOrRVar.cpp @@ -6,29 +6,25 @@ #include "../../src/Func.h" -#include #include +#include -void defineVarOrRVar() -{ +void defineVarOrRVar() { using Halide::VarOrRVar; namespace h = Halide; namespace p = boost::python; - p::class_("VarOrRVar", "A class that can represent Vars or RVars. " "Used for reorder calls which can accept a mix of either.", p::init(p::args("self", "n", "r"))) - .def(p::init(p::args("self", "v"))) - .def(p::init(p::args("self", "r"))) - .def(p::init(p::args("self", "r"))) - .def("name", &VarOrRVar::name, p::arg("self"), p::return_value_policy()) - .def_readonly("var", &VarOrRVar::var) - .def_readonly("rvar", &VarOrRVar::rvar) - .def_readonly("is_rvar", &VarOrRVar::is_rvar) - ; - + .def(p::init(p::args("self", "v"))) + .def(p::init(p::args("self", "r"))) + .def(p::init(p::args("self", "r"))) + .def("name", &VarOrRVar::name, p::arg("self"), p::return_value_policy()) + .def_readonly("var", &VarOrRVar::var) + .def_readonly("rvar", &VarOrRVar::rvar) + .def_readonly("is_rvar", &VarOrRVar::is_rvar); p::implicitly_convertible(); p::implicitly_convertible(); diff --git a/python_bindings/python/Func_VarOrRVar.h b/python_bindings/python/Func_VarOrRVar.h index c787957cacdb..3124d064c9eb 100644 --- a/python_bindings/python/Func_VarOrRVar.h +++ b/python_bindings/python/Func_VarOrRVar.h @@ -3,5 +3,4 @@ void defineVarOrRVar(); - -#endif // FUNC_VARORRVAR_H +#endif // FUNC_VARORRVAR_H diff --git a/python_bindings/python/Func_gpu.cpp b/python_bindings/python/Func_gpu.cpp index 300ae76dd99c..fc6e5ffb7568 100644 --- a/python_bindings/python/Func_gpu.cpp +++ b/python_bindings/python/Func_gpu.cpp @@ -5,12 +5,10 @@ #include "../../src/Func.h" - namespace h = Halide; namespace p = boost::python; -void defineFuncGpuMethods(p::class_ &func_class) -{ +void defineFuncGpuMethods(p::class_ &func_class) { using namespace func_and_stage_implementation_details; // defineFuncOrStageGpuMethods is defined in the header file diff --git a/python_bindings/python/Func_gpu.h b/python_bindings/python/Func_gpu.h index c127b6a49727..cf9acb6c9b51 100644 --- a/python_bindings/python/Func_gpu.h +++ b/python_bindings/python/Func_gpu.h @@ -9,7 +9,6 @@ /// Define all gpu related methods void defineFuncGpuMethods(boost::python::class_ &func_class); - namespace func_and_stage_implementation_details { // These are methods shared with Stage @@ -17,148 +16,131 @@ namespace func_and_stage_implementation_details { namespace hh = Halide; namespace bp = boost::python; -template -FuncOrStage &func_gpu_threads0(FuncOrStage &that, hh::VarOrRVar thread_x, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_threads0(FuncOrStage &that, hh::VarOrRVar thread_x, hh::DeviceAPI device_api) { return that.gpu_threads(thread_x, device_api); } -template -FuncOrStage &func_gpu_threads1(FuncOrStage &that, hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_threads1(FuncOrStage &that, hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::DeviceAPI device_api) { return that.gpu_threads(thread_x, thread_y, device_api); } -template -FuncOrStage &func_gpu_threads2(FuncOrStage &that, hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::VarOrRVar thread_z, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_threads2(FuncOrStage &that, hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::VarOrRVar thread_z, hh::DeviceAPI device_api) { return that.gpu_threads(thread_x, thread_y, thread_z, device_api); } - -template -FuncOrStage &func_gpu_blocks0(FuncOrStage &that, hh::VarOrRVar block_x, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_blocks0(FuncOrStage &that, hh::VarOrRVar block_x, hh::DeviceAPI device_api) { return that.gpu_blocks(block_x, device_api); } -template -FuncOrStage &func_gpu_blocks1(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_blocks1(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::DeviceAPI device_api) { return that.gpu_blocks(block_x, block_y, device_api); } -template -FuncOrStage &func_gpu_blocks2(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::VarOrRVar block_z, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_blocks2(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::VarOrRVar block_z, hh::DeviceAPI device_api) { return that.gpu_blocks(block_x, block_y, block_z, device_api); } - -template +template FuncOrStage &func_gpu0(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar thread_x, - hh::DeviceAPI device_api) -{ + hh::DeviceAPI device_api) { return that.gpu(block_x, thread_x, device_api); } -template +template FuncOrStage &func_gpu1(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, - hh::DeviceAPI device_api) -{ + hh::DeviceAPI device_api) { return that.gpu(block_x, block_y, thread_x, thread_y, device_api); } -template +template FuncOrStage &func_gpu2(FuncOrStage &that, hh::VarOrRVar block_x, hh::VarOrRVar block_y, hh::VarOrRVar block_z, - hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::VarOrRVar thread_z, - hh::DeviceAPI device_api) -{ + hh::VarOrRVar thread_x, hh::VarOrRVar thread_y, hh::VarOrRVar thread_z, + hh::DeviceAPI device_api) { return that.gpu(block_x, block_y, block_z, thread_x, thread_y, thread_z, device_api); } - -template -FuncOrStage &func_gpu_tile0(FuncOrStage &that, hh::VarOrRVar x, int x_size, hh::DeviceAPI device_api) -{ +template +FuncOrStage &func_gpu_tile0(FuncOrStage &that, hh::VarOrRVar x, int x_size, hh::DeviceAPI device_api) { return that.gpu_tile(x, x_size, hh::TailStrategy::Auto, device_api); } -template +template FuncOrStage &func_gpu_tile1(FuncOrStage &that, hh::VarOrRVar x, hh::VarOrRVar y, int x_size, int y_size, - hh::DeviceAPI device_api) -{ + hh::DeviceAPI device_api) { return that.gpu_tile(x, y, x_size, y_size, hh::TailStrategy::Auto, device_api); } -template +template FuncOrStage &func_gpu_tile2(FuncOrStage &that, hh::VarOrRVar x, hh::VarOrRVar y, hh::VarOrRVar z, int x_size, int y_size, int z_size, - hh::DeviceAPI device_api) -{ + hh::DeviceAPI device_api) { return that.gpu_tile(x, y, z, x_size, y_size, z_size, hh::TailStrategy::Auto, device_api); } - /// Define all gpu related methods -template -void defineFuncOrStageGpuMethods(bp::class_ &func_or_stage_class) -{ +template +void defineFuncOrStageGpuMethods(bp::class_ &func_or_stage_class) { func_or_stage_class - .def("gpu_threads", &func_gpu_threads2, - (bp::arg("self"), - bp::arg("thread_x"), bp::arg("thread_y"), bp::arg("thread_z"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>(), - "Tell Halide that the following dimensions correspond to GPU " - "thread indices. This is useful if you compute a producer " - "function within the block indices of a consumer function, and " - "want to control how that function's dimensions map to GPU " - "threads. If the selected target is not an appropriate GPU, this " - "just marks those dimensions as parallel.") - .def("gpu_threads", &func_gpu_threads1, - (bp::arg("self"), - bp::arg("thread_x"), bp::arg("thread_y"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()) - .def("gpu_threads", &func_gpu_threads0, - (bp::arg("self"), - bp::arg("thread_x"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()); + .def("gpu_threads", &func_gpu_threads2, + (bp::arg("self"), + bp::arg("thread_x"), bp::arg("thread_y"), bp::arg("thread_z"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>(), + "Tell Halide that the following dimensions correspond to GPU " + "thread indices. This is useful if you compute a producer " + "function within the block indices of a consumer function, and " + "want to control how that function's dimensions map to GPU " + "threads. If the selected target is not an appropriate GPU, this " + "just marks those dimensions as parallel.") + .def("gpu_threads", &func_gpu_threads1, + (bp::arg("self"), + bp::arg("thread_x"), bp::arg("thread_y"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()) + .def("gpu_threads", &func_gpu_threads0, + (bp::arg("self"), + bp::arg("thread_x"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()); func_or_stage_class - .def("gpu_single_thread", &FuncOrStage::gpu_single_thread, - (bp::arg("self"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>(), - "Tell Halide to run this stage using a single gpu thread and " - "block. This is not an efficient use of your GPU, but it can be " - "useful to avoid copy-back for intermediate update stages that " - "touch a very small part of your Func."); + .def("gpu_single_thread", &FuncOrStage::gpu_single_thread, + (bp::arg("self"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>(), + "Tell Halide to run this stage using a single gpu thread and " + "block. This is not an efficient use of your GPU, but it can be " + "useful to avoid copy-back for intermediate update stages that " + "touch a very small part of your Func."); func_or_stage_class - .def("gpu_blocks", &func_gpu_blocks2, - (bp::arg("self"), - bp::arg("block_x"), bp::arg("block_y"), bp::arg("block_z"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>(), - "Tell Halide that the following dimensions correspond to GPU " - "block indices. This is useful for scheduling stages that will " - "run serially within each GPU block. If the selected target is " - "not ptx, this just marks those dimensions as parallel.") - .def("gpu_blocks", &func_gpu_blocks1, - (bp::arg("self"), - bp::arg("block_x"), bp::arg("block_y"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()) - .def("gpu_blocks", &func_gpu_blocks0, - (bp::arg("self"), - bp::arg("block_x"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()); + .def("gpu_blocks", &func_gpu_blocks2, + (bp::arg("self"), + bp::arg("block_x"), bp::arg("block_y"), bp::arg("block_z"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>(), + "Tell Halide that the following dimensions correspond to GPU " + "block indices. This is useful for scheduling stages that will " + "run serially within each GPU block. If the selected target is " + "not ptx, this just marks those dimensions as parallel.") + .def("gpu_blocks", &func_gpu_blocks1, + (bp::arg("self"), + bp::arg("block_x"), bp::arg("block_y"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()) + .def("gpu_blocks", &func_gpu_blocks0, + (bp::arg("self"), + bp::arg("block_x"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()); func_or_stage_class .def("gpu", &func_gpu2, @@ -185,32 +167,31 @@ void defineFuncOrStageGpuMethods(bp::class_ &func_or_stage_class) bp::return_internal_reference<1>()); func_or_stage_class - .def("gpu_tile", &func_gpu_tile2, - (bp::arg("self"), - bp::arg("x"), bp::arg("y"), bp::arg("z"), - bp::arg("x_size"), bp::arg("y_size"), bp::arg("z_size"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>(), - "Short-hand for tiling a domain and mapping the tile indices " - "to GPU block indices and the coordinates within each tile to " - "GPU thread indices. Consumes the variables given, so do all " - "other scheduling first.") - .def("gpu_tile", &func_gpu_tile1, - (bp::arg("self"), - bp::arg("x"), bp::arg("y"), - bp::arg("x_size"), bp::arg("y_size"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()) - .def("gpu_tile", &func_gpu_tile0, - (bp::arg("self"), - bp::arg("x"), bp::arg("x_size"), - bp::arg("device_api") = hh::DeviceAPI::Default_GPU), - bp::return_internal_reference<1>()); + .def("gpu_tile", &func_gpu_tile2, + (bp::arg("self"), + bp::arg("x"), bp::arg("y"), bp::arg("z"), + bp::arg("x_size"), bp::arg("y_size"), bp::arg("z_size"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>(), + "Short-hand for tiling a domain and mapping the tile indices " + "to GPU block indices and the coordinates within each tile to " + "GPU thread indices. Consumes the variables given, so do all " + "other scheduling first.") + .def("gpu_tile", &func_gpu_tile1, + (bp::arg("self"), + bp::arg("x"), bp::arg("y"), + bp::arg("x_size"), bp::arg("y_size"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()) + .def("gpu_tile", &func_gpu_tile0, + (bp::arg("self"), + bp::arg("x"), bp::arg("x_size"), + bp::arg("device_api") = hh::DeviceAPI::Default_GPU), + bp::return_internal_reference<1>()); return; } -} // end of namespace func_and_stage_implementation_details - +} // end of namespace func_and_stage_implementation_details -#endif // FUNC_GPU_H +#endif // FUNC_GPU_H diff --git a/python_bindings/python/Function.cpp b/python_bindings/python/Function.cpp index 5594112da52e..b1c98d294518 100644 --- a/python_bindings/python/Function.cpp +++ b/python_bindings/python/Function.cpp @@ -3,12 +3,11 @@ // to avoid compiler confusion, python.hpp must be include before Halide headers #include -#include "../../src/Func.h" // includes everything needed here +#include "../../src/Func.h" // includes everything needed here #include -void defineExternFuncArgument() -{ +void defineExternFuncArgument() { using Halide::ExternFuncArgument; namespace h = Halide; namespace p = boost::python; @@ -30,8 +29,7 @@ void defineExternFuncArgument() .def("is_expr", &ExternFuncArgument::is_expr) .def("is_buffer", &ExternFuncArgument::is_buffer) .def("is_image_param", &ExternFuncArgument::is_image_param) - .def("defined", &ExternFuncArgument::defined) - ; + .def("defined", &ExternFuncArgument::defined); return; } diff --git a/python_bindings/python/Function.h b/python_bindings/python/Function.h index 2088ba53e935..cb7931d27d46 100644 --- a/python_bindings/python/Function.h +++ b/python_bindings/python/Function.h @@ -3,4 +3,4 @@ void defineExternFuncArgument(); -#endif // FUNCTION_H +#endif // FUNCTION_H diff --git a/python_bindings/python/Halide.cpp b/python_bindings/python/Halide.cpp index 836c27f39ba6..2f64f732987b 100644 --- a/python_bindings/python/Halide.cpp +++ b/python_bindings/python/Halide.cpp @@ -6,9 +6,9 @@ #include "Expr.h" #include "Func.h" #include "Function.h" +#include "IROperator.h" #include "Image.h" #include "InlineReductions.h" -#include "IROperator.h" #include "Lambda.h" #include "Param.h" #include "RDom.h" @@ -38,8 +38,7 @@ void defineLlvmHelpers() } */ -BOOST_PYTHON_MODULE(halide) -{ +BOOST_PYTHON_MODULE(halide) { using namespace boost::python; // we include all the pieces and bits from the Halide API diff --git a/python_bindings/python/IROperator.cpp b/python_bindings/python/IROperator.cpp index 358b99ddfbdc..a9d147aacb3e 100644 --- a/python_bindings/python/IROperator.cpp +++ b/python_bindings/python/IROperator.cpp @@ -10,24 +10,18 @@ namespace h = Halide; namespace p = boost::python; - - -h::Expr reinterpret0(h::Type t, h::Expr e) -{ +h::Expr reinterpret0(h::Type t, h::Expr e) { return h::reinterpret(t, e); } -h::Expr cast0(h::Type t, h::Expr e) -{ +h::Expr cast0(h::Type t, h::Expr e) { return Halide::cast(t, e); } -h::Expr select0(h::Expr condition, h::Expr true_value, h::Expr false_value) -{ +h::Expr select0(h::Expr condition, h::Expr true_value, h::Expr false_value) { return h::select(condition, true_value, false_value); } - h::Expr select1(h::Expr c1, h::Expr v1, h::Expr c2, h::Expr v2, h::Expr default_val) { @@ -155,25 +149,19 @@ h::Expr select9(h::Expr c1, h::Expr v1, c10, v10, default_val); } -h::Expr print_when0(h::Expr condition, p::tuple values_passed) -{ +h::Expr print_when0(h::Expr condition, p::tuple values_passed) { const size_t num_values = p::len(values_passed); std::vector values; values.reserve(num_values); - for(size_t i=0; i < num_values; i += 1) - { + for (size_t i = 0; i < num_values; i += 1) { p::object o = values_passed[i]; p::extract expr_extract(o); - if(expr_extract.check()) - { + if (expr_extract.check()) { values.push_back(expr_extract()); - } - else - { - for(size_t j=0; j < num_values; j+=1) - { + } else { + for (size_t j = 0; j < num_values; j += 1) { p::object o = values_passed[j]; const std::string o_str = p::extract(p::str(o)); printf("print_when values[%lu] == %s\n", j, o_str.c_str()); @@ -185,48 +173,40 @@ h::Expr print_when0(h::Expr condition, p::tuple values_passed) return h::print_when(condition, values); } -h::Expr random_float0() -{ +h::Expr random_float0() { return h::random_float(); } -h::Expr random_float1(h::Expr seed) -{ +h::Expr random_float1(h::Expr seed) { return h::random_float(seed); } -h::Expr random_int0() -{ +h::Expr random_int0() { return h::random_int(); } -h::Expr random_int1(h::Expr seed) -{ +h::Expr random_int1(h::Expr seed) { return h::random_int(seed); } - -h::Expr undef0(h::Type type) -{ +h::Expr undef0(h::Type type) { return h::undef(type); } -h::Expr memoize_tag0(h::Expr result, const std::vector &cache_key_values) -{ +h::Expr memoize_tag0(h::Expr result, const std::vector &cache_key_values) { return h::memoize_tag(result, cache_key_values); } -void defineOperators() -{ +void defineOperators() { // defined in IROperator.h h::Expr (*max_exprs)(h::Expr, h::Expr) = &h::max; - h::Expr (*max_expr_int)(h::Expr, int) = &h::max; - h::Expr (*max_int_expr)(int, h::Expr) = &h::max; + h::Expr (*max_expr_int)(h::Expr, int) = &h::max; + h::Expr (*max_int_expr)(int, h::Expr) = &h::max; h::Expr (*min_exprs)(h::Expr, h::Expr) = &h::min; - h::Expr (*min_expr_int)(h::Expr, int) = &h::min; - h::Expr (*min_int_expr)(int, h::Expr) = &h::min; + h::Expr (*min_expr_int)(h::Expr, int) = &h::min; + h::Expr (*min_int_expr)(int, h::Expr) = &h::min; p::def("max", max_exprs, p::args("a", "b"), @@ -291,14 +271,12 @@ void defineOperators() "various ways to write this yourself, but they contain numerous " "gotchas and don't always compile to good code, so use this instead."); - p::def("select", &select0, p::args("condition", "true_value", "false_value"), "Returns an expression similar to the ternary operator in C, except " "that it always evaluates all arguments. If the first argument is " "true, then return the second, else return the third. Typically " "vectorizes cleanly, but benefits from SSE41 or newer on x86."); - p::def("select", &select1, p::args("c1", "v1", "c2", "v2", "default_val"), "A multi-way variant of select similar to a switch statement in C, " "which can accept multiple conditions and values in pairs. Evaluates " @@ -306,44 +284,44 @@ void defineOperators() "final value if all conditions are false."); p::def("select", &select2, p::args( - "c1", "v1", - "c2", "v2", - "c3", "v3", - "default_val")); + "c1", "v1", + "c2", "v2", + "c3", "v3", + "default_val")); p::def("select", &select3, p::args( - "c1", "v1", - "c2", "v2", - "c3", "v3", - "c4", "v4", - "default_val")); + "c1", "v1", + "c2", "v2", + "c3", "v3", + "c4", "v4", + "default_val")); p::def("select", &select4, p::args( - "c1", "v1", - "c2", "v2", - "c3", "v3", - "c4", "v4", - "c5", "v5", - "default_val")); + "c1", "v1", + "c2", "v2", + "c3", "v3", + "c4", "v4", + "c5", "v5", + "default_val")); p::def("select", &select5, p::args( - "c1", "v1", - "c2", "v2", - "c3", "v3", - "c4", "v4", - "c5", "v5", - "c6", "v6", - "default_val")); + "c1", "v1", + "c2", "v2", + "c3", "v3", + "c4", "v4", + "c5", "v5", + "c6", "v6", + "default_val")); p::def("select", &select6, p::args( - "c1", "v1", - "c2", "v2", - "c3", "v3", - "c4", "v4", - "c5", "v5", - "c6", "v6", - "c7", "v7", - "default_val")); + "c1", "v1", + "c2", "v2", + "c3", "v3", + "c4", "v4", + "c5", "v5", + "c6", "v6", + "c7", "v7", + "default_val")); /* // Too many arguments for boost.python. Hopefully rare enough use case. @@ -426,7 +404,6 @@ void defineOperators() "Return the hyperbolic arcsine of a floating-point expression. If the argument is " "not floating-point, it is cast to Float(32). Does not vectorize well."); - p::def("cosh", &h::cosh, p::args("x"), "Return the hyperbolic cosine of a floating-point expression. If the argument is " "not floating-point, it is cast to Float(32). Does not vectorize well."); @@ -596,7 +573,6 @@ void defineOperators() "Generally, lerp will vectorize as if it were an operation on a type " "twice the bit size of the inferred type for x and y. "); - p::def("popcount", &h::popcount, p::args("x"), "Count the number of set bits in an expression."); @@ -636,12 +612,12 @@ void defineOperators() "elements.\n" "This function vectorizes cleanly."); - p::def("random_float", &random_float0); // no args + p::def("random_float", &random_float0); // no args p::def("random_int", &random_int1, p::args("seed"), "Return a random variable representing a uniformly distributed " "32-bit integer. See \\ref random_float. Vectorizes cleanly."); - p::def("random_int", &random_int0); // no args + p::def("random_int", &random_int0); // no args p::def("undef", &undef0, p::args("type"), "Return an undef value of the given type. Halide skips stores that " diff --git a/python_bindings/python/IROperator.h b/python_bindings/python/IROperator.h index f86b5cf42e52..85a9238567b6 100644 --- a/python_bindings/python/IROperator.h +++ b/python_bindings/python/IROperator.h @@ -3,4 +3,4 @@ void defineOperators(); -#endif // IROPERATOR_H +#endif // IROPERATOR_H diff --git a/python_bindings/python/Image.cpp b/python_bindings/python/Image.cpp index 881ba2e0367e..867dc556c8c2 100644 --- a/python_bindings/python/Image.cpp +++ b/python_bindings/python/Image.cpp @@ -1,8 +1,8 @@ #include "Image.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include #include +#include #define USE_NUMPY @@ -13,21 +13,20 @@ // we use Halide::numpy #include "../numpy/numpy.hpp" #endif -#endif // USE_NUMPY +#endif // USE_NUMPY #include -#include #include +#include #include "../../src/runtime/HalideBuffer.h" -#include "../../src/Buffer.h" -#include "Type.h" #include "Func.h" +#include "Type.h" -#include -#include -#include #include +#include +#include +#include namespace h = Halide; namespace p = boost::python; @@ -38,14 +37,14 @@ namespace bn = boost::numpy; #else namespace bn = Halide::numpy; #endif -#endif // USE_NUMPY +#endif // USE_NUMPY -template +template Ret image_call_operator(h::Image &that, Args... args) { return that(args...); } -template +template h::Expr image_call_operator_tuple(h::Image &that, p::tuple &args_passed) { std::vector expr_args; for (ssize_t i = 0; i < p::len(args_passed); i++) { @@ -54,31 +53,31 @@ h::Expr image_call_operator_tuple(h::Image &that, p::tuple &args_passed) { return that(expr_args); } -template +template T image_to_setitem_operator0(h::Image &that, int x, T value) { return that(x) = value; } -template +template T image_to_setitem_operator1(h::Image &that, int x, int y, T value) { return that(x, y) = value; } -template +template T image_to_setitem_operator2(h::Image &that, int x, int y, int z, T value) { return that(x, y, z) = value; } -template +template T image_to_setitem_operator3(h::Image &that, int x, int y, int z, int w, T value) { return that(x, y, z, w) = value; } -template +template T image_to_setitem_operator4(h::Image &that, p::tuple &args_passed, T value) { std::vector int_args; const size_t args_len = p::len(args_passed); - for (size_t i=0; i < args_len; i += 1) { + for (size_t i = 0; i < args_len; i += 1) { p::object o = args_passed[i]; p::extract int32_extract(o); @@ -97,7 +96,7 @@ T image_to_setitem_operator4(h::Image &that, p::tuple &args_passed, T value) "a tuple of (convertible to) int."); } - switch(int_args.size()) { + switch (int_args.size()) { case 1: return that(int_args[0]) = value; case 2: @@ -111,35 +110,35 @@ T image_to_setitem_operator4(h::Image &that, p::tuple &args_passed, T value) throw std::invalid_argument("image_to_setitem_operator4 only handles 1 to 4 dimensional tuples"); } - return 0; // this line should never be reached + return 0; // this line should never be reached } -template +template const T *image_data(const h::Image &image) { return image.data(); } -template +template void image_set_min1(h::Image &im, int m0) { im.set_min(m0); } -template +template void image_set_min2(h::Image &im, int m0, int m1) { im.set_min(m0, m1); } -template +template void image_set_min3(h::Image &im, int m0, int m1, int m2) { im.set_min(m0, m1, m2); } -template +template void image_set_min4(h::Image &im, int m0, int m1, int m2, int m3) { im.set_min(m0, m1, m2, m3); } -template +template std::string image_repr(const h::Image &image) { std::string repr; @@ -160,17 +159,13 @@ std::string image_repr(const h::Image &image) { boost::format f(""); - repr = boost::str(f % suffix % t.bits() % t.bytes() - % image.extent(0) % image.extent(1) % image.extent(2) % image.extent(3) - % image.min(0) % image.min(1) % image.min(2) % image.min(3) - % image.stride(0) % image.stride(1) % image.stride(2) % image.stride(3)); + repr = boost::str(f % suffix % t.bits() % t.bytes() % image.extent(0) % image.extent(1) % image.extent(2) % image.extent(3) % image.min(0) % image.min(1) % image.min(2) % image.min(3) % image.stride(0) % image.stride(1) % image.stride(2) % image.stride(3)); return repr; } -template -boost::python::object get_type_function_wrapper() -{ +template +boost::python::object get_type_function_wrapper() { std::function &)> return_type_func = [&](h::Image &that) -> h::Type { return halide_type_of(); }; auto call_policies = p::default_call_policies(); @@ -178,14 +173,13 @@ boost::python::object get_type_function_wrapper() return p::make_function(return_type_func, call_policies, p::arg("self"), func_sig()); } -template +template void image_copy_to_host(h::Image &im) { im.copy_to_host(); } -template -void defineImage_impl(const std::string suffix, const h::Type type) -{ +template +void defineImage_impl(const std::string suffix, const h::Type type) { using h::Image; using h::Expr; @@ -204,35 +198,35 @@ void defineImage_impl(const std::string suffix, const h::Type type) // Constructors image_class .def(p::init( - p::args("self", "x"), - "Allocate an image with the given dimensions.")) + p::args("self", "x"), + "Allocate an image with the given dimensions.")) .def(p::init( - p::args("self", "x", "y"), - "Allocate an image with the given dimensions.")) + p::args("self", "x", "y"), + "Allocate an image with the given dimensions.")) .def(p::init( - p::args("self", "x", "y", "z"), - "Allocate an image with the given dimensions.")) + p::args("self", "x", "y", "z"), + "Allocate an image with the given dimensions.")) .def(p::init( - p::args("self", "x", "y", "z", "w"), - "Allocate an image with the given dimensions.")) + p::args("self", "x", "y", "z", "w"), + "Allocate an image with the given dimensions.")) .def(p::init( - p::args("self", "r"), - "Wrap a single-element realization in an Image object.")) + p::args("self", "r"), + "Wrap a single-element realization in an Image object.")) .def(p::init( - p::args("self", "b"), - "Wrap a buffer_t in an Image object, so that we can access its pixels.")); + p::args("self", "b"), + "Wrap a buffer_t in an Image object, so that we can access its pixels.")); image_class .def("__repr__", &image_repr, p::arg("self")); image_class .def("data", &image_data, p::arg("self"), - p::return_value_policy< p::return_opaque_pointer >(), // not sure this will do what we want + p::return_value_policy(), // not sure this will do what we want "Get a pointer to the element at the min location.") .def("copy_to_host", &image_copy_to_host, p::arg("self"), @@ -357,8 +351,7 @@ void defineImage_impl(const std::string suffix, const h::Type type) "Assuming this image is four-dimensional, set the value of the element at position (x, y, z, w)") .def("__setitem__", &image_to_setitem_operator4, p::args("self", "tuple", "value"), "Assuming this image is one to four-dimensional, " - "set the value of the element at position indicated by tuple (x, y, z, w)") - ; + "set the value of the element at position indicated by tuple (x, y, z, w)"); p::implicitly_convertible, h::Argument>(); @@ -399,14 +392,14 @@ p::object image_to_python_object(const h::Image<> &im) { } h::Image<> python_object_to_image(p::object obj) { - p::extract> image_extract_uint8(obj); + p::extract> image_extract_uint8(obj); p::extract> image_extract_uint16(obj); p::extract> image_extract_uint32(obj); - p::extract> image_extract_int8(obj); - p::extract> image_extract_int16(obj); - p::extract> image_extract_int32(obj); - p::extract> image_extract_float(obj); - p::extract> image_extract_double(obj); + p::extract> image_extract_int8(obj); + p::extract> image_extract_int16(obj); + p::extract> image_extract_int32(obj); + p::extract> image_extract_float(obj); + p::extract> image_extract_double(obj); if (image_extract_uint8.check()) { return image_extract_uint8(); @@ -432,14 +425,13 @@ h::Image<> python_object_to_image(p::object obj) { #ifdef USE_NUMPY - bn::dtype type_to_dtype(const h::Type &t) { - if (t == h::UInt(8)) return bn::dtype::get_builtin(); - if (t == h::UInt(16)) return bn::dtype::get_builtin(); - if (t == h::UInt(32)) return bn::dtype::get_builtin(); - if (t == h::Int(8)) return bn::dtype::get_builtin(); - if (t == h::Int(16)) return bn::dtype::get_builtin(); - if (t == h::Int(32)) return bn::dtype::get_builtin(); + if (t == h::UInt(8)) return bn::dtype::get_builtin(); + if (t == h::UInt(16)) return bn::dtype::get_builtin(); + if (t == h::UInt(32)) return bn::dtype::get_builtin(); + if (t == h::Int(8)) return bn::dtype::get_builtin(); + if (t == h::Int(16)) return bn::dtype::get_builtin(); + if (t == h::Int(32)) return bn::dtype::get_builtin(); if (t == h::Float(32)) return bn::dtype::get_builtin(); if (t == h::Float(64)) return bn::dtype::get_builtin(); throw std::runtime_error("type_to_dtype received a Halide::Type with no known numpy dtype equivalent"); @@ -447,21 +439,20 @@ bn::dtype type_to_dtype(const h::Type &t) { } h::Type dtype_to_type(const bn::dtype &t) { - if (t == bn::dtype::get_builtin()) return h::UInt(8); + if (t == bn::dtype::get_builtin()) return h::UInt(8); if (t == bn::dtype::get_builtin()) return h::UInt(16); if (t == bn::dtype::get_builtin()) return h::UInt(32); - if (t == bn::dtype::get_builtin()) return h::Int(8); - if (t == bn::dtype::get_builtin()) return h::Int(16); - if (t == bn::dtype::get_builtin()) return h::Int(32); - if (t == bn::dtype::get_builtin()) return h::Float(32); - if (t == bn::dtype::get_builtin()) return h::Float(64); + if (t == bn::dtype::get_builtin()) return h::Int(8); + if (t == bn::dtype::get_builtin()) return h::Int(16); + if (t == bn::dtype::get_builtin()) return h::Int(32); + if (t == bn::dtype::get_builtin()) return h::Float(32); + if (t == bn::dtype::get_builtin()) return h::Float(64); throw std::runtime_error("dtype_to_type received a numpy type with no known Halide type equivalent"); return h::Type(); } /// Will create a Halide::Image object pointing to the array data -p::object ndarray_to_image(bn::ndarray &array) -{ +p::object ndarray_to_image(bn::ndarray &array) { h::Type t = dtype_to_type(array.get_dtype()); const int dims = array.get_nd(); void *host = reinterpret_cast(array.get_data()); @@ -475,7 +466,6 @@ p::object ndarray_to_image(bn::ndarray &array) return image_to_python_object(h::Image<>(t, host, dims, shape)); } - bn::ndarray image_to_ndarray(p::object image_object) { h::Image<> im = python_object_to_image(image_object); @@ -496,30 +486,29 @@ bn::ndarray image_to_ndarray(p::object image_object) { image_object); } - #endif struct ImageFactory { - template - static p::object create_image_object(Args...args) { + template + static p::object create_image_object(Args... args) { typedef h::Image ImageType; typedef typename p::manage_new_object::apply::type converter_t; converter_t converter; - PyObject* obj = converter(new ImageType(args...)); + PyObject *obj = converter(new ImageType(args...)); return p::object(p::handle<>(obj)); } - template + template static p::object create_image_impl(h::Type t, Args... args) { - if (t == h::UInt(8)) return create_image_object(args...); - if (t == h::UInt(16)) return create_image_object(args...); - if (t == h::UInt(32)) return create_image_object(args...); - if (t == h::Int(8)) return create_image_object(args...); - if (t == h::Int(16)) return create_image_object(args...); - if (t == h::Int(32)) return create_image_object(args...); - if (t == h::Float(32)) return create_image_object(args...); - if (t == h::Float(64)) return create_image_object(args...); + if (t == h::UInt(8)) return create_image_object(args...); + if (t == h::UInt(16)) return create_image_object(args...); + if (t == h::UInt(32)) return create_image_object(args...); + if (t == h::Int(8)) return create_image_object(args...); + if (t == h::Int(16)) return create_image_object(args...); + if (t == h::Int(32)) return create_image_object(args...); + if (t == h::Float(32)) return create_image_object(args...); + if (t == h::Float(64)) return create_image_object(args...); throw std::invalid_argument("ImageFactory::create_image_impl received type not handled"); return p::object(); } @@ -551,13 +540,9 @@ struct ImageFactory { static p::object create_image_from_buffer(h::Type type, buffer_t b) { return create_image_impl(type, b); } - }; - - -void defineImage() -{ +void defineImage() { defineImage_impl("_uint8", h::UInt(8)); defineImage_impl("_uint16", h::UInt(16)); defineImage_impl("_uint32", h::UInt(32)); @@ -569,7 +554,6 @@ void defineImage() defineImage_impl("_float32", h::Float(32)); defineImage_impl("_float64", h::Float(64)); - // "Image" will look as a class, but instead it will be simply a factory method p::def("Image", &ImageFactory::create_image0, p::args("type"), @@ -589,12 +573,12 @@ void defineImage() p::def("Image", &ImageFactory::create_image_from_realization, p::args("type", "r"), - p::with_custodian_and_ward_postcall<0, 2>(), // the realization reference count is increased + p::with_custodian_and_ward_postcall<0, 2>(), // the realization reference count is increased "Wrap a single-element realization in an Image object of type T."); p::def("Image", &ImageFactory::create_image_from_buffer, p::args("type", "b"), - p::with_custodian_and_ward_postcall<0, 2>(), // the buffer_t reference count is increased + p::with_custodian_and_ward_postcall<0, 2>(), // the buffer_t reference count is increased "Wrap a buffer_t in an Image object of type T, so that we can access its pixels."); #ifdef USE_NUMPY @@ -602,21 +586,21 @@ void defineImage() p::def("ndarray_to_image", &ndarray_to_image, p::args("array"), - p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased + p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased "Converts a numpy array into a Halide::Image." "Will take into account the array size, dimensions, and type." "Created Image refers to the array data (no copy)."); p::def("Image", &ndarray_to_image, p::args("array"), - p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased + p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased "Wrap numpy array in a Halide::Image." "Will take into account the array size, dimensions, and type." "Created Image refers to the array data (no copy)."); p::def("image_to_ndarray", &image_to_ndarray, p::args("image"), - p::with_custodian_and_ward_postcall<0, 1>(), // the image reference count is increased + p::with_custodian_and_ward_postcall<0, 1>(), // the image reference count is increased "Creates a numpy array from a Halide::Image." "Will take into account the Image size, dimensions, and type." "Created ndarray refers to the Image data (no copy)."); diff --git a/python_bindings/python/Image.h b/python_bindings/python/Image.h index fdf3c9043b66..2b9961a16a30 100644 --- a/python_bindings/python/Image.h +++ b/python_bindings/python/Image.h @@ -1,11 +1,11 @@ #ifndef IMAGE_H #define IMAGE_H -#include #include "../../src/runtime/HalideBuffer.h" +#include void defineImage(); boost::python::object image_to_python_object(const Halide::Image<> &); Halide::Image<> python_object_to_image(boost::python::object); -#endif // IMAGE_H +#endif // IMAGE_H diff --git a/python_bindings/python/InlineReductions.cpp b/python_bindings/python/InlineReductions.cpp index e55e7492c4ec..a5c89e8d7ddb 100644 --- a/python_bindings/python/InlineReductions.cpp +++ b/python_bindings/python/InlineReductions.cpp @@ -36,7 +36,6 @@ h::Expr maximum1(h::RDom r, h::Expr e, const std::string name) { return h::maximum(r, e, name); } - h::Expr minimum0(h::Expr e, const std::string name) { return h::minimum(e, name); } @@ -61,38 +60,37 @@ p::object argmax1(h::RDom r, h::Expr e, const std::string name) { return expr_vector_to_python_tuple(h::argmax(r, e, name).as_vector()); } - void defineInlineReductions() { // Defines some inline reductions: sum, product, minimum, maximum. - p::def("sum", &sum0, (p::arg("e"), p::arg("name")="sum"), + p::def("sum", &sum0, (p::arg("e"), p::arg("name") = "sum"), "An inline reduction."); - p::def("sum", &sum1, (p::arg("r"), p::arg("e"), p::arg("name")="sum"), + p::def("sum", &sum1, (p::arg("r"), p::arg("e"), p::arg("name") = "sum"), "An inline reduction."); - p::def("product", &product0, (p::arg("e"), p::arg("name")="product"), + p::def("product", &product0, (p::arg("e"), p::arg("name") = "product"), "An inline reduction."); - p::def("product", &product1, (p::arg("r"), p::arg("e"), p::arg("name")="product"), + p::def("product", &product1, (p::arg("r"), p::arg("e"), p::arg("name") = "product"), "An inline reduction."); - p::def("maximum", &maximum0, (p::arg("e"), p::arg("name")="maximum"), + p::def("maximum", &maximum0, (p::arg("e"), p::arg("name") = "maximum"), "An inline reduction."); - p::def("maximum", &maximum1, (p::arg("r"), p::arg("e"), p::arg("name")="maximum"), + p::def("maximum", &maximum1, (p::arg("r"), p::arg("e"), p::arg("name") = "maximum"), "An inline reduction."); - p::def("minimum", &minimum0, (p::arg("e"), p::arg("name")="minimum"), + p::def("minimum", &minimum0, (p::arg("e"), p::arg("name") = "minimum"), "An inline reduction."); - p::def("minimum", &minimum1, (p::arg("r"), p::arg("e"), p::arg("name")="minimum"), + p::def("minimum", &minimum1, (p::arg("r"), p::arg("e"), p::arg("name") = "minimum"), "An inline reduction."); - p::def("argmin", &argmin0, (p::arg("e"), p::arg("name")="argmin"), + p::def("argmin", &argmin0, (p::arg("e"), p::arg("name") = "argmin"), "An inline reduction."); - p::def("argmin", &argmin1, (p::arg("r"), p::arg("e"), p::arg("name")="argmin"), + p::def("argmin", &argmin1, (p::arg("r"), p::arg("e"), p::arg("name") = "argmin"), "An inline reduction."); - p::def("argmax", &argmax0, (p::arg("e"), p::arg("name")="argmax"), + p::def("argmax", &argmax0, (p::arg("e"), p::arg("name") = "argmax"), "An inline reduction."); - p::def("argmax", &argmax1, (p::arg("r"), p::arg("e"), p::arg("name")="argmax"), + p::def("argmax", &argmax1, (p::arg("r"), p::arg("e"), p::arg("name") = "argmax"), "An inline reduction."); return; diff --git a/python_bindings/python/InlineReductions.h b/python_bindings/python/InlineReductions.h index 55ef35b681e1..ac778b515d4b 100644 --- a/python_bindings/python/InlineReductions.h +++ b/python_bindings/python/InlineReductions.h @@ -3,4 +3,4 @@ void defineInlineReductions(); -#endif // INLINEREDUCTIONS_H +#endif // INLINEREDUCTIONS_H diff --git a/python_bindings/python/Lambda.cpp b/python_bindings/python/Lambda.cpp index 8e738fdf6411..8ac6e9633398 100644 --- a/python_bindings/python/Lambda.cpp +++ b/python_bindings/python/Lambda.cpp @@ -7,42 +7,34 @@ namespace h = Halide; -h::Func lambda0D(h::Expr e) -{ +h::Func lambda0D(h::Expr e) { return lambda(e); } -h::Func lambda1D(h::Var x, h::Expr e) -{ +h::Func lambda1D(h::Var x, h::Expr e) { return lambda(x, e); } -h::Func lambda2D(h::Var x, h::Var y, h::Expr e) -{ +h::Func lambda2D(h::Var x, h::Var y, h::Expr e) { return lambda(x, y, e); } -h::Func lambda3D(h::Var x, h::Var y, h::Var z, h::Expr e) -{ +h::Func lambda3D(h::Var x, h::Var y, h::Var z, h::Expr e) { return lambda(x, y, z, e); } -h::Func lambda4D(h::Var x, h::Var y, h::Var z, h::Var w, h::Expr e) -{ +h::Func lambda4D(h::Var x, h::Var y, h::Var z, h::Var w, h::Expr e) { return lambda(x, y, z, w, e); } -h::Func lambda5D(h::Var x, h::Var y, h::Var z, h::Var w, h::Var v, h::Expr e) -{ +h::Func lambda5D(h::Var x, h::Var y, h::Var z, h::Var w, h::Var v, h::Expr e) { return lambda(x, y, z, w, v, e); } - /// Convenience functions for creating small anonymous Halide functions. /// See test/lambda.cpp for example usage. /// lambda is a python keyword so we used lambda0D, lambda1D, ... lambda5D instead. -void defineLambda() -{ +void defineLambda() { namespace p = boost::python; p::def("lambda0D", &lambda0D, p::arg("e"), @@ -82,5 +74,3 @@ void defineLambda() return; } - - diff --git a/python_bindings/python/Lambda.h b/python_bindings/python/Lambda.h index da8b9d3c5d76..47bdaa36214b 100644 --- a/python_bindings/python/Lambda.h +++ b/python_bindings/python/Lambda.h @@ -3,4 +3,4 @@ void defineLambda(); -#endif // LAMBDA_H +#endif // LAMBDA_H diff --git a/python_bindings/python/Param.cpp b/python_bindings/python/Param.cpp index 6ef1934b1851..3f26efcf673e 100644 --- a/python_bindings/python/Param.cpp +++ b/python_bindings/python/Param.cpp @@ -2,39 +2,36 @@ #include "Param.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include -#include #include "add_operators.h" +#include +#include -#include "../../src/Param.h" -#include "../../src/OutputImageParam.h" +#include "../../src/IROperator.h" // enables Param + Expr operations (which include is it ?) #include "../../src/ImageParam.h" -#include "../../src/IROperator.h" // enables Param + Expr operations (which include is it ?) +#include "../../src/OutputImageParam.h" +#include "../../src/Param.h" #include "Type.h" #include -#include #include +#include namespace h = Halide; namespace p = boost::python; -h::Expr imageparam_to_expr_operator0(h::ImageParam &that, p::tuple args_passed) -{ +h::Expr imageparam_to_expr_operator0(h::ImageParam &that, p::tuple args_passed) { std::vector expr_args; // All ImageParam operator()(...) Expr and Var variants end up building a vector // all other variants are equivalent to this one const size_t args_len = p::len(args_passed); - for(size_t i=0; i < args_len; i+=1) - { + for (size_t i = 0; i < args_len; i += 1) { expr_args.push_back(p::extract(args_passed[i])); } return that(expr_args); } -h::Expr imageparam_to_expr_operator1(h::ImageParam &that, h::Expr an_expr) -{ +h::Expr imageparam_to_expr_operator1(h::ImageParam &that, h::Expr an_expr) { std::vector expr_args; expr_args.push_back(an_expr); // All ImageParam operator()(...) Expr and Var variants end up building a vector @@ -42,21 +39,16 @@ h::Expr imageparam_to_expr_operator1(h::ImageParam &that, h::Expr an_expr) return that(expr_args); } -std::string imageparam_repr(h::ImageParam ¶m) // non-const due to a Halide bug in master (to be fixed) +std::string imageparam_repr(h::ImageParam ¶m) // non-const due to a Halide bug in master (to be fixed) { std::string repr; const h::Type &t = param.type(); - if (param.defined()) - { + if (param.defined()) { boost::format f(""); repr = boost::str(f % param.name()); - } - else - { + } else { boost::format f(""); - repr = boost::str(f % param.name() - % type_code_to_string(t) % t.bits() - % param.extent(0) % param.extent(1) % param.extent(2) % param.extent(3)); + repr = boost::str(f % param.name() % type_code_to_string(t) % t.bits() % param.extent(0) % param.extent(1) % param.extent(2) % param.extent(3)); } return repr; } @@ -65,86 +57,84 @@ h::Image<> image_param_get(h::ImageParam ¶m) { return param.get(); } -template +template void image_param_set(h::ImageParam ¶m, const h::Image &im) { param.set(im); } -void defineImageParam() -{ +void defineImageParam() { using Halide::ImageParam; - auto image_param_class = p::class_("ImageParam", "An Image parameter to a halide pipeline. E.g., the input image. \n" "Constructor:: \n" - " ImageParam(Type t, int dims, name="") \n" + " ImageParam(Type t, int dims, name=" + ") \n" "The image can be indexed via I[x], I[y,x], etc, which gives a Halide Expr. " "Supports most of the methods of Image.", p::init(p::args("self", "t", "dims", "name"))) - .def(p::init(p::args("self", "t", "dims"))) - .def("name", &ImageParam::name, p::arg("self"), - p::return_value_policy(), - "Get name of ImageParam.") - - .def("dimensions", &ImageParam::dimensions, p::arg("self"), - "Get the dimensionality of this image parameter") - .def("channels", &ImageParam::channels, p::arg("self"), - "Get an expression giving the extent in dimension 2, " - "which by convention is the channel-count of the image") - - .def("width", &ImageParam::width, p::arg("self"), - "Get an expression giving the extent in dimension 0, which by " - "convention is the width of the image") - .def("height", &ImageParam::height, p::arg("self"), - "Get an expression giving the extent in dimension 1, which by " - "convention is the height of the image") - - .def("left", &ImageParam::left, p::arg("self"), - "Get an expression giving the minimum coordinate in dimension 0, which " - "by convention is the coordinate of the left edge of the image") - .def("right", &ImageParam::right, p::arg("self"), - "Get an expression giving the maximum coordinate in dimension 0, which " - "by convention is the coordinate of the right edge of the image") - .def("top", &ImageParam::top, p::arg("self"), - "Get an expression giving the minimum coordinate in dimension 1, which " - "by convention is the top of the image") - .def("bottom", &ImageParam::bottom, p::arg("self"), - "Get an expression giving the maximum coordinate in dimension 1, which " - "by convention is the bottom of the image") - - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this IageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("set", &image_param_set, p::args("self", "im"), - "Bind a buffer to this ImageParam. Only relevant for jitting.") - .def("get", &image_param_get, p::arg("self"), - "Get the buffer bound to this ImageParam. Only relevant for jitting.") - .def("__getitem__", &imageparam_to_expr_operator0, p::args("self", "tuple"), - "Construct an expression which loads from this image. " - "The location is extended with enough implicit variables to match " - "the dimensionality of the image (see \\ref Var::implicit).\n\n" - "Call with: [x], [x,y], [x,y,z], or [x,y,z,w]") - .def("__getitem__", &imageparam_to_expr_operator1, p::args("self", "expr"), - "Construct an expression which loads from this image. " - "The location is extended with enough implicit variables to match " - "the dimensionality of the image (see \\ref Var::implicit).\n\n" - "Call with: [x], [x,y], [x,y,z], or [x,y,z,w]") - - .def("__repr__", &imageparam_repr, p::arg("self")) - ; + .def(p::init(p::args("self", "t", "dims"))) + .def("name", &ImageParam::name, p::arg("self"), + p::return_value_policy(), + "Get name of ImageParam.") + + .def("dimensions", &ImageParam::dimensions, p::arg("self"), + "Get the dimensionality of this image parameter") + .def("channels", &ImageParam::channels, p::arg("self"), + "Get an expression giving the extent in dimension 2, " + "which by convention is the channel-count of the image") + + .def("width", &ImageParam::width, p::arg("self"), + "Get an expression giving the extent in dimension 0, which by " + "convention is the width of the image") + .def("height", &ImageParam::height, p::arg("self"), + "Get an expression giving the extent in dimension 1, which by " + "convention is the height of the image") + + .def("left", &ImageParam::left, p::arg("self"), + "Get an expression giving the minimum coordinate in dimension 0, which " + "by convention is the coordinate of the left edge of the image") + .def("right", &ImageParam::right, p::arg("self"), + "Get an expression giving the maximum coordinate in dimension 0, which " + "by convention is the coordinate of the right edge of the image") + .def("top", &ImageParam::top, p::arg("self"), + "Get an expression giving the minimum coordinate in dimension 1, which " + "by convention is the top of the image") + .def("bottom", &ImageParam::bottom, p::arg("self"), + "Get an expression giving the maximum coordinate in dimension 1, which " + "by convention is the bottom of the image") + + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this IageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("set", &image_param_set, p::args("self", "im"), + "Bind a buffer to this ImageParam. Only relevant for jitting.") + .def("get", &image_param_get, p::arg("self"), + "Get the buffer bound to this ImageParam. Only relevant for jitting.") + .def("__getitem__", &imageparam_to_expr_operator0, p::args("self", "tuple"), + "Construct an expression which loads from this image. " + "The location is extended with enough implicit variables to match " + "the dimensionality of the image (see \\ref Var::implicit).\n\n" + "Call with: [x], [x,y], [x,y,z], or [x,y,z,w]") + .def("__getitem__", &imageparam_to_expr_operator1, p::args("self", "expr"), + "Construct an expression which loads from this image. " + "The location is extended with enough implicit variables to match " + "the dimensionality of the image (see \\ref Var::implicit).\n\n" + "Call with: [x], [x,y], [x,y,z], or [x,y,z,w]") + + .def("__repr__", &imageparam_repr, p::arg("self")); p::implicitly_convertible(); @@ -154,9 +144,7 @@ void defineImageParam() return; } - -void defineOutputImageParam() -{ +void defineOutputImageParam() { //"A handle on the output buffer of a pipeline. Used to make static // "promises about the output size and stride." @@ -285,15 +273,13 @@ void defineOutputImageParam() return; } -template -h::Expr param_as_expr(h::Param &that) -{ +template +h::Expr param_as_expr(h::Param &that) { return static_cast(that); } -template -std::string param_repr(const h::Param ¶m) -{ +template +std::string param_repr(const h::Param ¶m) { std::string repr; const h::Type &t = param.type(); boost::format f(""); @@ -302,88 +288,86 @@ std::string param_repr(const h::Param ¶m) return repr; } -template -void defineParam_impl(const std::string suffix, const h::Type type) -{ +template +void defineParam_impl(const std::string suffix, const h::Type type) { using Halide::Param; auto param_class = - p::class_>(("Param"+ suffix).c_str(), - "A scalar parameter to a halide pipeline. If you're jitting, this " - "should be bound to an actual value of type T using the set method " - "before you realize the function uses this. If you're statically " - "compiling, this param should appear in the argument list.", - p::init<>( - p::arg("self"), - "Construct a scalar parameter of type T with a unique auto-generated name")); + p::class_>(("Param" + suffix).c_str(), + "A scalar parameter to a halide pipeline. If you're jitting, this " + "should be bound to an actual value of type T using the set method " + "before you realize the function uses this. If you're statically " + "compiling, this param should appear in the argument list.", + p::init<>( + p::arg("self"), + "Construct a scalar parameter of type T with a unique auto-generated name")); param_class - .def(p::init( - p::args("self", "val"), - "Construct a scalar parameter of type T an initial value of " - "'val'. Only triggers for scalar types.")) - .def(p::init( - p::args("self", "name"), "Construct a scalar parameter of type T with the given name.")) - .def(p::init( - p::args("self", "name", "val"), - "Construct a scalar parameter of type T with the given name " - "and an initial value of 'val'.")) - .def(p::init( - p::args("self", "val", "min", "max"), - "Construct a scalar parameter of type T with an initial value of 'val' " - "and a given min and max.")) - .def(p::init( - p::args("self", "name", "val", "min", "max"), - "Construct a scalar parameter of type T with the given name " - "and an initial value of 'val' and a given min and max.")) - - .def("name", &Param::name, p::arg("self"), - p::return_value_policy(), - "Get the name of this parameter") - .def("is_explicit_name", &Param::is_explicit_name, p::arg("self"), - "Return true iff the name was explicitly specified in the ctor (vs autogenerated).") - - .def("get", &Param::get, p::arg("self"), - "Get the current value of this parameter. Only meaningful when jitting.") - .def("set", &Param::set, p::args("self", "val"), - "Set the current value of this parameter. Only meaningful when jitting") - // .def("get_address", &Param::get_address, p::arg("self"), - // "Get a pointer to the location that stores the current value of - // "this parameter. Only meaningful for jitting.") - - .def("type", &Param::type, p::arg("self"), - "Get the halide type of T") - - .def("set_range", &Param::set_range, p::args("self", "min", "max"), - "Get or set the possible range of this parameter. " - "Use undefined Exprs to mean unbounded.") - .def("set_min_value", &Param::set_min_value, p::args("self", "min"), - "Get or set the possible range of this parameter. " - "Use undefined Exprs to mean unbounded.") - .def("set_max_value", &Param::set_max_value, p::args("self", "max"), - "Get or set the possible range of this parameter. " - "Use undefined Exprs to mean unbounded.") - .def("get_min_value", &Param::get_min_value, p::arg("self")) - .def("get_max_value", &Param::get_max_value, p::arg("self")) - - .def("expr", ¶m_as_expr, p::arg("self"), - "You can use this parameter as an expression in a halide " - "function definition") - - // "You can use this parameter as an expression in a halide - // "function definition" - // operator Expr() const - - // "Using a param as the argument to an external stage treats it - // "as an Expr" - // operator ExternFuncArgument() const - - // "Construct the appropriate argument matching this parameter, - // "for the purpose of generating the right type signature when - // "statically compiling halide pipelines." - // operator Argument() const - - .def("__repr__", ¶m_repr, p::arg("self")) - ; + .def(p::init( + p::args("self", "val"), + "Construct a scalar parameter of type T an initial value of " + "'val'. Only triggers for scalar types.")) + .def(p::init( + p::args("self", "name"), "Construct a scalar parameter of type T with the given name.")) + .def(p::init( + p::args("self", "name", "val"), + "Construct a scalar parameter of type T with the given name " + "and an initial value of 'val'.")) + .def(p::init( + p::args("self", "val", "min", "max"), + "Construct a scalar parameter of type T with an initial value of 'val' " + "and a given min and max.")) + .def(p::init( + p::args("self", "name", "val", "min", "max"), + "Construct a scalar parameter of type T with the given name " + "and an initial value of 'val' and a given min and max.")) + + .def("name", &Param::name, p::arg("self"), + p::return_value_policy(), + "Get the name of this parameter") + .def("is_explicit_name", &Param::is_explicit_name, p::arg("self"), + "Return true iff the name was explicitly specified in the ctor (vs autogenerated).") + + .def("get", &Param::get, p::arg("self"), + "Get the current value of this parameter. Only meaningful when jitting.") + .def("set", &Param::set, p::args("self", "val"), + "Set the current value of this parameter. Only meaningful when jitting") + // .def("get_address", &Param::get_address, p::arg("self"), + // "Get a pointer to the location that stores the current value of + // "this parameter. Only meaningful for jitting.") + + .def("type", &Param::type, p::arg("self"), + "Get the halide type of T") + + .def("set_range", &Param::set_range, p::args("self", "min", "max"), + "Get or set the possible range of this parameter. " + "Use undefined Exprs to mean unbounded.") + .def("set_min_value", &Param::set_min_value, p::args("self", "min"), + "Get or set the possible range of this parameter. " + "Use undefined Exprs to mean unbounded.") + .def("set_max_value", &Param::set_max_value, p::args("self", "max"), + "Get or set the possible range of this parameter. " + "Use undefined Exprs to mean unbounded.") + .def("get_min_value", &Param::get_min_value, p::arg("self")) + .def("get_max_value", &Param::get_max_value, p::arg("self")) + + .def("expr", ¶m_as_expr, p::arg("self"), + "You can use this parameter as an expression in a halide " + "function definition") + + // "You can use this parameter as an expression in a halide + // "function definition" + // operator Expr() const + + // "Using a param as the argument to an external stage treats it + // "as an Expr" + // operator ExternFuncArgument() const + + // "Construct the appropriate argument matching this parameter, + // "for the purpose of generating the right type signature when + // "statically compiling halide pipelines." + // operator Argument() const + + .def("__repr__", ¶m_repr, p::arg("self")); p::implicitly_convertible, h::Argument>(); //p::implicitly_convertible, h::ExternFuncArgument>(); @@ -408,43 +392,33 @@ void defineParam_impl(const std::string suffix, const h::Type type) return; } - -template -p::object create_param_object(Args...args) -{ +template +p::object create_param_object(Args... args) { typedef h::Param ParamType; typedef typename p::manage_new_object::apply::type converter_t; converter_t converter; - PyObject* obj = converter( new ParamType(args...) ); - return p::object( p::handle<>( obj ) ); + PyObject *obj = converter(new ParamType(args...)); + return p::object(p::handle<>(obj)); } -struct end_of_recursion_t {}; // dummy helper type +struct end_of_recursion_t {}; // dummy helper type // C++ fun, variadic template recursive function ! -template -p::object create_param0_impl(h::Type type, std::string name) -{ - if(h::type_of() == type) - { - if(name != "") - { +template +p::object create_param0_impl(h::Type type, std::string name) { + if (h::type_of() == type) { + if (name != "") { return create_param_object(name); - } - else - { + } else { return create_param_object(); } - } - else - { - return create_param0_impl(type, name); // keep recursing + } else { + return create_param0_impl(type, name); // keep recursing } } -template<> -p::object create_param0_impl(h::Type type, std::string /*name*/) -{ // end of recursion, did not find a matching type +template <> +p::object create_param0_impl(h::Type type, std::string /*name*/) { // end of recursion, did not find a matching type printf("create_param0_impl received %s\n", type_repr(type).c_str()); throw std::invalid_argument("ParamFactory::create_param0_impl received type not handled"); return p::object(); @@ -457,7 +431,6 @@ p::object create_param0_impl(h::Type type, std::string /*nam // } //}; - //// C++ fun, variadic template recursive function ! //template //p::object create_param1_impl(h::Type type, std::string name, p::object val, h::Expr min, h::Expr max) @@ -516,64 +489,53 @@ p::object create_param0_impl(h::Type type, std::string /*nam //} typedef boost::mpl::list pixel_types_t; + boost::int8_t, boost::int16_t, boost::int32_t, + float, double> + pixel_types_t; // C++ fun, variadic template recursive function ! // (if you wonder why struct::operator() and not a function, // see http://artofsoftware.org/2012/12/20/c-template-function-partial-specialization ) -template -struct create_param1_impl_t -{ - p::object operator()(h::Type type, p::object val, Args... args) - { +template +struct create_param1_impl_t { + p::object operator()(h::Type type, p::object val, Args... args) { typedef typename boost::mpl::front::type pixel_t; - if(h::type_of() == type) - { + if (h::type_of() == type) { p::extract val_extract(val); - if(val_extract.check()) - { + if (val_extract.check()) { pixel_t true_val = val_extract(); return call_create_param_object(true_val, args...); - } - else - { + } else { printf("create_param1_impl type == %s\n", type_repr(type).c_str()); const std::string val_str = p::extract(p::str(val)); printf("create_param1_impl val == %s\n", val_str.c_str()); throw std::invalid_argument("ParamFactory::create_param1_impl called with " "a value that could not be converted to the given type"); } - } - else - { // keep recursing + } else { // keep recursing typedef typename boost::mpl::pop_front::type pixels_types_tail_t; return create_param1_impl_t()(type, val, args...); } } - template - p::object call_create_param_object(T true_val) - { + template + p::object call_create_param_object(T true_val) { return create_param_object(true_val); } - template - p::object call_create_param_object(T true_val, std::string name) - { + template + p::object call_create_param_object(T true_val, std::string name) { return create_param_object(name, true_val); } - template - p::object call_create_param_object(T true_val, std::string name, h::Expr min, h::Expr max) - { + template + p::object call_create_param_object(T true_val, std::string name, h::Expr min, h::Expr max) { return create_param_object(name, true_val, min, max); } - template - p::object call_create_param_object(T true_val, h::Expr min, h::Expr max) - { + template + p::object call_create_param_object(T true_val, h::Expr min, h::Expr max) { return create_param_object(true_val, min, max); } @@ -583,14 +545,11 @@ struct create_param1_impl_t // throw std::runtime_error("create_param1_impl_t was called with parameters types not yet handled"); // return p::object(); // } - }; -template -struct create_param1_impl_t -{ - p::object operator()(h::Type type, p::object val, Args... args) - { +template +struct create_param1_impl_t { + p::object operator()(h::Type type, p::object val, Args... args) { // end of recursion, did not find a matching type printf("create_param1_impl received %s\n", type_repr(type).c_str()); throw std::invalid_argument("ParamFactory::create_param1_impl received type not handled"); @@ -598,48 +557,39 @@ struct create_param1_impl_t } }; - -struct ParamFactory -{ - static p::object create_param0(h::Type type) - { +struct ParamFactory { + static p::object create_param0(h::Type type) { return create_param0_impl< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double>(type, ""); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(type, ""); } - static p::object create_param1(h::Type type, std::string name) - { + static p::object create_param1(h::Type type, std::string name) { return create_param0_impl< - boost::uint8_t, boost::uint16_t, boost::uint32_t, - boost::int8_t, boost::int16_t, boost::int32_t, - float, double>(type, name); + boost::uint8_t, boost::uint16_t, boost::uint32_t, + boost::int8_t, boost::int16_t, boost::int32_t, + float, double>(type, name); } - static p::object create_param2(h::Type type, p::object val) - { + static p::object create_param2(h::Type type, p::object val) { return create_param1_impl_t()(type, val); } - static p::object create_param3(h::Type type, std::string name, p::object val) - { + static p::object create_param3(h::Type type, std::string name, p::object val) { return create_param1_impl_t()(type, val, name); } - static p::object create_param4(h::Type type, p::object val, h::Expr min, h::Expr max) - { + static p::object create_param4(h::Type type, p::object val, h::Expr min, h::Expr max) { return create_param1_impl_t()(type, val, min, max); } - static p::object create_param5(h::Type type, std::string name, p::object val, h::Expr min, h::Expr max) - { + static p::object create_param5(h::Type type, std::string name, p::object val, h::Expr min, h::Expr max) { return create_param1_impl_t()(type, val, name, min, max); } }; -void defineParam() -{ +void defineParam() { // Might create linking problems, if Param.cpp is not included in the python library defineParam_impl("_uint8", h::UInt(8)); @@ -653,7 +603,6 @@ void defineParam() defineParam_impl("_float32", h::Float(32)); defineParam_impl("_float64", h::Float(64)); - // "Param" will look as a class, but instead it will be simply a factory method // Order of definitions matter, the last defined method is attempted first // Here it is important to try "type, name" before "type, val" @@ -676,13 +625,11 @@ void defineParam() ; - p::def("user_context_value", &h::user_context_value, "Returns an Expr corresponding to the user context passed to " "the function (if any). It is rare that this function is necessary " "(e.g. to pass the user context to an extern function written in C)."); - defineImageParam(); defineOutputImageParam(); return; diff --git a/python_bindings/python/Param.h b/python_bindings/python/Param.h index 66767fc2c26b..b3081588c7bb 100644 --- a/python_bindings/python/Param.h +++ b/python_bindings/python/Param.h @@ -3,4 +3,4 @@ void defineParam(); -#endif // PARAM_H +#endif // PARAM_H diff --git a/python_bindings/python/RDom.cpp b/python_bindings/python/RDom.cpp index c2e790c0963e..add1290276ad 100644 --- a/python_bindings/python/RDom.cpp +++ b/python_bindings/python/RDom.cpp @@ -1,82 +1,72 @@ #include "RDom.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include #include "add_operators.h" +#include -#include "../../src/RDom.h" +#include "../../src/IROperator.h" // for operations with RVar #include "../../src/ImageParam.h" -#include "../../src/IROperator.h" // for operations with RVar +#include "../../src/RDom.h" #include namespace h = Halide; namespace p = boost::python; -void defineRVar() -{ +void defineRVar() { using Halide::RVar; auto rvar_class = p::class_("RVar", - "A reduction variable represents a single dimension of a reduction " - "domain (RDom). Don't construct them directly, instead construct an " - "RDom, and use RDom::operator[] to get at the variables. For " - "single-dimensional reduction domains, you can just cast a " - "single-dimensional RDom to an RVar.", - p::init<>(p::args("self"), "An empty reduction variable.") - ) - .def(p::init(p::args("self", "name"), "Construct an RVar with the given name")) - - .def(p::init( - p::args("self", "domain", "index"), - "Construct a reduction variable with the given name and " - "bounds. Must be a member of the given reduction domain.")) - - .def("min", &RVar::min, p::arg("self"), - "The minimum value that this variable will take on") - .def("extent", &RVar::extent, p::arg("self"), - "The number that this variable will take on. " - "The maximum value of this variable will be min() + extent() - 1") - - .def("domain", &RVar::domain, p::arg("self"), - "The reduction domain this is associated with.") - - .def("name", &RVar::name, p::arg("self"), - p::return_value_policy(), - "The name of this reduction variable"); + "A reduction variable represents a single dimension of a reduction " + "domain (RDom). Don't construct them directly, instead construct an " + "RDom, and use RDom::operator[] to get at the variables. For " + "single-dimensional reduction domains, you can just cast a " + "single-dimensional RDom to an RVar.", + p::init<>(p::args("self"), "An empty reduction variable.")) + .def(p::init(p::args("self", "name"), "Construct an RVar with the given name")) + + .def(p::init( + p::args("self", "domain", "index"), + "Construct a reduction variable with the given name and " + "bounds. Must be a member of the given reduction domain.")) + + .def("min", &RVar::min, p::arg("self"), + "The minimum value that this variable will take on") + .def("extent", &RVar::extent, p::arg("self"), + "The number that this variable will take on. " + "The maximum value of this variable will be min() + extent() - 1") + + .def("domain", &RVar::domain, p::arg("self"), + "The reduction domain this is associated with.") + + .def("name", &RVar::name, p::arg("self"), + p::return_value_policy(), + "The name of this reduction variable"); p::implicitly_convertible(); - add_operators(rvar_class); // define operators with int, rvars, and exprs + add_operators(rvar_class); // define operators with int, rvars, and exprs add_operators_with(rvar_class); return; } - -h::RDom *RDom_constructor0(p::tuple args, std::string name="") -{ +h::RDom *RDom_constructor0(p::tuple args, std::string name = "") { const size_t args_len = p::len(args); - if((args_len % 2) != 0) - { + if ((args_len % 2) != 0) { throw std::invalid_argument("RDom constructor expects an even number of Expr inputs"); } std::vector exprs; - for(size_t i=0; i < args_len; i += 1) - { + for (size_t i = 0; i < args_len; i += 1) { p::object o = args[i]; p::extract expr_extract(o); - if(expr_extract.check()) - { + if (expr_extract.check()) { exprs.push_back(expr_extract()); - } - else - { - for(size_t j=0; j < args_len; j+=1) - { + } else { + for (size_t j = 0; j < args_len; j += 1) { p::object o = args[j]; const std::string o_str = p::extract(p::str(o)); printf("RDom constructor args_passed[%lu] == %s\n", j, o_str.c_str()); @@ -87,40 +77,33 @@ h::RDom *RDom_constructor0(p::tuple args, std::string name="") assert((exprs.size() % 2) == 0); std::vector> ranges; - for(size_t i=0; i < exprs.size(); i += 2) - { - ranges.push_back(std::make_pair(exprs[i], exprs[i+1])); + for (size_t i = 0; i < exprs.size(); i += 2) { + ranges.push_back(std::make_pair(exprs[i], exprs[i + 1])); } return new h::RDom(ranges, name); } - h::RDom *RDom_constructor1(h::Expr min0, h::Expr extent0, - std::string name="") -{ + std::string name = "") { std::vector> ranges; ranges.push_back(std::make_pair(min0, extent0)); return new h::RDom(ranges, name); } - h::RDom *RDom_constructor2(h::Expr min0, h::Expr extent0, h::Expr min1, h::Expr extent1, - std::string name="") -{ + std::string name = "") { std::vector> ranges; ranges.push_back(std::make_pair(min0, extent0)); ranges.push_back(std::make_pair(min1, extent1)); return new h::RDom(ranges, name); } - h::RDom *RDom_constructor3(h::Expr min0, h::Expr extent0, h::Expr min1, h::Expr extent1, h::Expr min2, h::Expr extent2, - std::string name="") -{ + std::string name = "") { std::vector> ranges; ranges.push_back(std::make_pair(min0, extent0)); ranges.push_back(std::make_pair(min1, extent1)); @@ -132,8 +115,7 @@ h::RDom *RDom_constructor4(h::Expr min0, h::Expr extent0, h::Expr min1, h::Expr extent1, h::Expr min2, h::Expr extent2, h::Expr min3, h::Expr extent3, - std::string name="") -{ + std::string name = "") { std::vector> ranges; ranges.push_back(std::make_pair(min0, extent0)); ranges.push_back(std::make_pair(min1, extent1)); @@ -142,9 +124,7 @@ h::RDom *RDom_constructor4(h::Expr min0, h::Expr extent0, return new h::RDom(ranges, name); } - -void defineRDom() -{ +void defineRDom() { using Halide::RDom; defineRVar(); @@ -153,102 +133,103 @@ void defineRDom() p::class_("_ReductionDomain", p::no_init); auto rdom_class = p::class_("RDom", - "A multi-dimensional domain over which to iterate. " - "Used when defining functions with update definitions.\n" - "See apps/bilateral_grid.py for an example of a reduction.\n\n" - "Constructors::\n\n" - " RDom(Expr min, Expr extent, name="") -- 1D reduction\n" - " RDom(Expr min0, Expr extent0, Expr min1, Expr extent1, name="") -- 2D reduction\n" - " (Similar for 3D and 4D reductions)\n" - " RDom(Buffer|ImageParam) -- Iterate over all points in the domain\n" - "The following global functions can be used for inline reductions::\n\n" - " minimum, maximum, product, sum", - p::init<>(p::arg("self"), "Construct an undefined reduction domain.")) - .def(p::init>(p::args("self", "buffer"), - "Construct a reduction domain that iterates over all points in " - "a given Buffer, Image, or ImageParam. " - "Has the same dimensionality as the argument.")) - .def(p::init(p::args("self", "image_param"), - "Construct a reduction domain that iterates over all points in " - "a given Buffer, Image, or ImageParam. " - "Has the same dimensionality as the argument.")) - .def(p::init( - p::args("self", "domain"), - "Construct a reduction domain that wraps an Internal ReductionDomain object.")) - .def("__init__", - p::make_constructor(&RDom_constructor0, p::default_call_policies(), - (p::arg("ranges"), p::arg("name")="")), - "Construct a multi-dimensional reduction domain with the given name. " - "If the name is left blank, a unique one is auto-generated.") - .def("__init__", - p::make_constructor(&RDom_constructor1, p::default_call_policies(), - (p::args("min0", "extent0"), - p::arg("name")="")), - "Construct a multi-dimensional reduction domain with the given name. " - "If the name is left blank, a unique one is auto-generated.") - .def("__init__", - p::make_constructor(&RDom_constructor2, p::default_call_policies(), - (p::args("min0", "extent0", "min1", "extent1"), - p::arg("name")="")), - "Construct a multi-dimensional reduction domain with the given name. " - "If the name is left blank, a unique one is auto-generated.") - .def("__init__", - p::make_constructor(&RDom_constructor3, p::default_call_policies(), - (p::args("min0", "extent0", "min1", "extent1", - "min2", "extent2"), - p::arg("name")="")), - "Construct a multi-dimensional reduction domain with the given name. " - "If the name is left blank, a unique one is auto-generated.") - .def("__init__", - p::make_constructor(&RDom_constructor4, p::default_call_policies(), - (p::args("min0", "extent0", "min1", "extent1", - "min2", "extent2"), - p::arg("name")="")), - "Construct a multi-dimensional reduction domain with the given name. " - "If the name is left blank, a unique one is auto-generated.") - - .def("domain", &RDom::domain, p::arg("self"), - "Get at the internal reduction domain object that this wraps.") - .def("defined", &RDom::defined, p::arg("self"), - "Check if this reduction domain is non-NULL") - .def("same_as", &RDom::same_as, p::args("self", "other"), - "Compare two reduction domains for equality of reference") - .def("dimensions", &RDom::dimensions, p::arg("self"), - "Get the dimensionality of a reduction domain") - .def("where", &RDom::where, p::args("self", "predicate"), - "Add a predicate to the RDom. An RDom may have multiple" - "predicates associated with it. An update definition that uses" - "an RDom only iterates over the subset points in the domain for" - "which all of its predicates are true. The predicate expression" - "obeys the same rules as the expressions used on the" - "right-hand-side of the corresponding update definition. It may" - "refer to the RDom's variables and free variables in the Func's" - "update definition. It may include calls to other Funcs, or make" - "recursive calls to the same Func. This permits iteration over" - "non-rectangular domains, or domains with sizes that vary with" - "some free variable, or domains with shapes determined by some" - "other Func. ") - //"Get at one of the dimensions of the reduction domain" - //EXPORT RVar operator[](int) const; - - //"Single-dimensional reduction domains can be used as RVars directly." - //EXPORT operator RVar() const; - - //"Single-dimensional reduction domains can be also be used as Exprs directly." - //EXPORT operator Expr() const; - - .def_readonly("x", &RDom::x, - "Direct access to the first four dimensions of the reduction domain. " - "Some of these variables may be undefined if the reduction domain has fewer than four dimensions.") - .def_readonly("y", &RDom::y) - .def_readonly("z", &RDom::z) - .def_readonly("w", &RDom::w) - ; + "A multi-dimensional domain over which to iterate. " + "Used when defining functions with update definitions.\n" + "See apps/bilateral_grid.py for an example of a reduction.\n\n" + "Constructors::\n\n" + " RDom(Expr min, Expr extent, name=" + ") -- 1D reduction\n" + " RDom(Expr min0, Expr extent0, Expr min1, Expr extent1, name=" + ") -- 2D reduction\n" + " (Similar for 3D and 4D reductions)\n" + " RDom(Buffer|ImageParam) -- Iterate over all points in the domain\n" + "The following global functions can be used for inline reductions::\n\n" + " minimum, maximum, product, sum", + p::init<>(p::arg("self"), "Construct an undefined reduction domain.")) + .def(p::init>(p::args("self", "buffer"), + "Construct a reduction domain that iterates over all points in " + "a given Buffer, Image, or ImageParam. " + "Has the same dimensionality as the argument.")) + .def(p::init(p::args("self", "image_param"), + "Construct a reduction domain that iterates over all points in " + "a given Buffer, Image, or ImageParam. " + "Has the same dimensionality as the argument.")) + .def(p::init( + p::args("self", "domain"), + "Construct a reduction domain that wraps an Internal ReductionDomain object.")) + .def("__init__", + p::make_constructor(&RDom_constructor0, p::default_call_policies(), + (p::arg("ranges"), p::arg("name") = "")), + "Construct a multi-dimensional reduction domain with the given name. " + "If the name is left blank, a unique one is auto-generated.") + .def("__init__", + p::make_constructor(&RDom_constructor1, p::default_call_policies(), + (p::args("min0", "extent0"), + p::arg("name") = "")), + "Construct a multi-dimensional reduction domain with the given name. " + "If the name is left blank, a unique one is auto-generated.") + .def("__init__", + p::make_constructor(&RDom_constructor2, p::default_call_policies(), + (p::args("min0", "extent0", "min1", "extent1"), + p::arg("name") = "")), + "Construct a multi-dimensional reduction domain with the given name. " + "If the name is left blank, a unique one is auto-generated.") + .def("__init__", + p::make_constructor(&RDom_constructor3, p::default_call_policies(), + (p::args("min0", "extent0", "min1", "extent1", + "min2", "extent2"), + p::arg("name") = "")), + "Construct a multi-dimensional reduction domain with the given name. " + "If the name is left blank, a unique one is auto-generated.") + .def("__init__", + p::make_constructor(&RDom_constructor4, p::default_call_policies(), + (p::args("min0", "extent0", "min1", "extent1", + "min2", "extent2"), + p::arg("name") = "")), + "Construct a multi-dimensional reduction domain with the given name. " + "If the name is left blank, a unique one is auto-generated.") + + .def("domain", &RDom::domain, p::arg("self"), + "Get at the internal reduction domain object that this wraps.") + .def("defined", &RDom::defined, p::arg("self"), + "Check if this reduction domain is non-NULL") + .def("same_as", &RDom::same_as, p::args("self", "other"), + "Compare two reduction domains for equality of reference") + .def("dimensions", &RDom::dimensions, p::arg("self"), + "Get the dimensionality of a reduction domain") + .def("where", &RDom::where, p::args("self", "predicate"), + "Add a predicate to the RDom. An RDom may have multiple" + "predicates associated with it. An update definition that uses" + "an RDom only iterates over the subset points in the domain for" + "which all of its predicates are true. The predicate expression" + "obeys the same rules as the expressions used on the" + "right-hand-side of the corresponding update definition. It may" + "refer to the RDom's variables and free variables in the Func's" + "update definition. It may include calls to other Funcs, or make" + "recursive calls to the same Func. This permits iteration over" + "non-rectangular domains, or domains with sizes that vary with" + "some free variable, or domains with shapes determined by some" + "other Func. ") + //"Get at one of the dimensions of the reduction domain" + //EXPORT RVar operator[](int) const; + + //"Single-dimensional reduction domains can be used as RVars directly." + //EXPORT operator RVar() const; + + //"Single-dimensional reduction domains can be also be used as Exprs directly." + //EXPORT operator Expr() const; + + .def_readonly("x", &RDom::x, + "Direct access to the first four dimensions of the reduction domain. " + "Some of these variables may be undefined if the reduction domain has fewer than four dimensions.") + .def_readonly("y", &RDom::y) + .def_readonly("z", &RDom::z) + .def_readonly("w", &RDom::w); p::implicitly_convertible(); p::implicitly_convertible(); - add_operators(rdom_class); // define operators with int, rdom and exprs + add_operators(rdom_class); // define operators with int, rdom and exprs add_operators_with(rdom_class); return; diff --git a/python_bindings/python/RDom.h b/python_bindings/python/RDom.h index 99d42be0eaf1..4aaf9213ddf8 100644 --- a/python_bindings/python/RDom.h +++ b/python_bindings/python/RDom.h @@ -3,4 +3,4 @@ void defineRDom(); -#endif // RDOM_H +#endif // RDOM_H diff --git a/python_bindings/python/Target.cpp b/python_bindings/python/Target.cpp index 15c721c9aa91..fbab6a38ff3a 100644 --- a/python_bindings/python/Target.cpp +++ b/python_bindings/python/Target.cpp @@ -7,8 +7,8 @@ #include "Expr.h" -#include #include +#include namespace h = Halide; namespace p = boost::python; @@ -22,9 +22,9 @@ void defineTarget() { using Halide::Target; auto target_class = - p::class_("Target", - "A struct representing a target machine and os to generate code for.", - p::init<>()) + p::class_("Target", + "A struct representing a target machine and os to generate code for.", + p::init<>()) // not all constructors (yet) exposed //Target(OS o, Arch a, int b, std::vector initial_features = std::vector()) @@ -53,79 +53,76 @@ void defineTarget() { "that can't be parsed, which is intentional).") .def("set_feature", &Target::set_feature, - (p::arg("self"), p::arg("f"), p::arg("value")=true)) + (p::arg("self"), p::arg("f"), p::arg("value") = true)) .def("set_features", &target_set_features, - (p::arg("self"), p::arg("features_to_set"), p::arg("value")=true)) + (p::arg("self"), p::arg("features_to_set"), p::arg("value") = true)) - // not all methods (yet) exposed + // not all methods (yet) exposed - ; + ; p::enum_("TargetOS", "The operating system used by the target. Determines which " "system calls to generate.") - .value("OSUnknown", Target::OS::OSUnknown) - .value("Linux", Target::OS::Linux) - .value("Windows", Target::OS::Windows) - .value("OSX", Target::OS::OSX) - .value("Android", Target::OS::Android) - .value("IOS", Target::OS::IOS) - .value("NaCl", Target::OS::NaCl) - .export_values() - ; + .value("OSUnknown", Target::OS::OSUnknown) + .value("Linux", Target::OS::Linux) + .value("Windows", Target::OS::Windows) + .value("OSX", Target::OS::OSX) + .value("Android", Target::OS::Android) + .value("IOS", Target::OS::IOS) + .value("NaCl", Target::OS::NaCl) + .export_values(); p::enum_("TargetArch", "The architecture used by the target. Determines the " "instruction set to use. For the PNaCl target, the \"instruction " " set\" is actually llvm bitcode.") - .value("ArchUnknown", Target::Arch::ArchUnknown) - .value("X86", Target::Arch::X86) - .value("ARM", Target::Arch::ARM) - .value("PNaCl", Target::Arch::PNaCl) - .value("MIPS", Target::Arch::MIPS) - .value("POWERPC", Target::Arch::POWERPC) - .export_values() - ; + .value("ArchUnknown", Target::Arch::ArchUnknown) + .value("X86", Target::Arch::X86) + .value("ARM", Target::Arch::ARM) + .value("PNaCl", Target::Arch::PNaCl) + .value("MIPS", Target::Arch::MIPS) + .value("POWERPC", Target::Arch::POWERPC) + .export_values(); p::enum_("TargetFeature", "Optional features a target can have.") - .value("JIT", Target::Feature::JIT) - .value("Debug", Target::Feature::Debug) - .value("NoAsserts", Target::Feature::NoAsserts) - .value("NoBoundsQuery", Target::Feature::NoBoundsQuery) - .value("Profile", Target::Feature::Profile) - - .value("SSE41", Target::Feature::SSE41) - .value("AVX", Target::Feature::AVX) - .value("AVX2", Target::Feature::AVX2) - .value("FMA", Target::Feature::FMA) - .value("FMA4", Target::Feature::FMA4) - .value("F16C", Target::Feature::F16C) - - .value("ARMv7s", Target::Feature::ARMv7s) - .value("NoNEON", Target::Feature::NoNEON) - - .value("VSX", Target::Feature::VSX) - .value("POWER_ARCH_2_07", Target::Feature::POWER_ARCH_2_07) - - .value("CUDA", Target::Feature::CUDA) - .value("CUDACapability30", Target::Feature::CUDACapability30) - .value("CUDACapability32", Target::Feature::CUDACapability32) - .value("CUDACapability35", Target::Feature::CUDACapability35) - .value("CUDACapability50", Target::Feature::CUDACapability50) - - .value("OpenCL", Target::Feature::OpenCL) - .value("CLDoubles", Target::Feature::CLDoubles) - - .value("OpenGL", Target::Feature::OpenGL) - .value("Renderscript", Target::Feature::Renderscript) - .value("UserContext", Target::Feature::UserContext) - .value("Matlab", Target::Feature::Matlab) - .value("Metal", Target::Feature::Metal) - .value("FeatureEnd", Target::Feature::FeatureEnd) - - .export_values() - ; + .value("JIT", Target::Feature::JIT) + .value("Debug", Target::Feature::Debug) + .value("NoAsserts", Target::Feature::NoAsserts) + .value("NoBoundsQuery", Target::Feature::NoBoundsQuery) + .value("Profile", Target::Feature::Profile) + + .value("SSE41", Target::Feature::SSE41) + .value("AVX", Target::Feature::AVX) + .value("AVX2", Target::Feature::AVX2) + .value("FMA", Target::Feature::FMA) + .value("FMA4", Target::Feature::FMA4) + .value("F16C", Target::Feature::F16C) + + .value("ARMv7s", Target::Feature::ARMv7s) + .value("NoNEON", Target::Feature::NoNEON) + + .value("VSX", Target::Feature::VSX) + .value("POWER_ARCH_2_07", Target::Feature::POWER_ARCH_2_07) + + .value("CUDA", Target::Feature::CUDA) + .value("CUDACapability30", Target::Feature::CUDACapability30) + .value("CUDACapability32", Target::Feature::CUDACapability32) + .value("CUDACapability35", Target::Feature::CUDACapability35) + .value("CUDACapability50", Target::Feature::CUDACapability50) + + .value("OpenCL", Target::Feature::OpenCL) + .value("CLDoubles", Target::Feature::CLDoubles) + + .value("OpenGL", Target::Feature::OpenGL) + .value("Renderscript", Target::Feature::Renderscript) + .value("UserContext", Target::Feature::UserContext) + .value("Matlab", Target::Feature::Matlab) + .value("Metal", Target::Feature::Metal) + .value("FeatureEnd", Target::Feature::FeatureEnd) + + .export_values(); p::def("get_host_target", &h::get_host_target, "Return the target corresponding to the host machine."); diff --git a/python_bindings/python/Target.h b/python_bindings/python/Target.h index b9229c100f79..3378f362666a 100644 --- a/python_bindings/python/Target.h +++ b/python_bindings/python/Target.h @@ -3,4 +3,4 @@ void defineTarget(); -#endif // TARGET_H +#endif // TARGET_H diff --git a/python_bindings/python/Type.cpp b/python_bindings/python/Type.cpp index 502d733e7e38..61489c85b72a 100644 --- a/python_bindings/python/Type.cpp +++ b/python_bindings/python/Type.cpp @@ -4,20 +4,17 @@ #include #include -#include "../../src/Type.h" #include "../../src/Expr.h" +#include "../../src/Type.h" -#include #include +#include namespace h = Halide; - -std::string type_code_to_string(const h::Type &t) -{ +std::string type_code_to_string(const h::Type &t) { std::string code_string = "unknown"; - switch(t.code()) - { + switch (t.code()) { case h::Type::UInt: code_string = "UInt"; break; @@ -45,16 +42,13 @@ Halide::Type make_handle(int lanes) { return Halide::Handle(lanes, nullptr); } -std::string type_repr(const h::Type &t) -{ +std::string type_repr(const h::Type &t) { auto message_format = boost::format(""); - return boost::str(message_format % type_code_to_string(t) % t.bits() % t.lanes()); } -void defineType() -{ +void defineType() { using Halide::Type; namespace p = boost::python; @@ -64,65 +58,64 @@ void defineType() p::class_("Type", "Default constructor initializes everything to predictable-but-unlikely values", p::no_init) - .def(p::init(p::args("code", "bits", "lanes"))) - .def(p::init(p::args("that"), "Copy constructor")) - - .def("bits", &Type::bits, - "The number of bits of precision of a single scalar value of this type.") - .def("bytes", &Type::bytes, - "The number of bytes required to store a single scalar value of this type. Ignores vector lanes.") - .def("lanes", &Type::lanes, - "How many elements (if a vector type). Should be 1 for scalar types.") - .def("is_bool", &Type::is_bool, p::arg("self"), - "Is this type boolean (represented as UInt(1))?") - .def("is_vector", &Type::is_vector, p::arg("self"), - "Is this type a vector type? (lanes > 1)") - .def("is_scalar", &Type::is_scalar, p::arg("self"), - "Is this type a scalar type? (lanes == 1)") - .def("is_float", &Type::is_float, p::arg("self"), - "Is this type a floating point type (float or double).") - .def("is_int", &Type::is_int, p::arg("self"), - "Is this type a signed integer type?") - .def("is_uint", &Type::is_uint, p::arg("self"), - "Is this type an unsigned integer type?") - .def("is_handle", &Type::is_handle, p::arg("self"), - "Is this type an opaque handle type (void *)") - .def(p::self == p::self) - .def(p::self != p::self) - .def("with_lanes", &Type::with_lanes, p::args("self", "w"), - "Produce a copy of this type, with 'lanes' vector lanes") - .def("with_bits", &Type::with_bits, p::args("self", "w"), - "Produce a copy of this type, with 'bits' bits") - .def("element_of", &Type::element_of, p::arg("self"), - "Produce the type of a single element of this vector type") - .def("can_represent", can_represent_other_type, p::arg("other"), - "Can this type represent all values of another type?") - .def("max", &Type::max, p::arg("self"), - "Return an expression which is the maximum value of this type") - .def("min", &Type::min, p::arg("self"), - "Return an expression which is the minimum value of this type") - .def("__repr__", &type_repr, p::arg("self"), - "Return a string containing a printable representation of a Type object.") - ; + .def(p::init(p::args("code", "bits", "lanes"))) + .def(p::init(p::args("that"), "Copy constructor")) + + .def("bits", &Type::bits, + "The number of bits of precision of a single scalar value of this type.") + .def("bytes", &Type::bytes, + "The number of bytes required to store a single scalar value of this type. Ignores vector lanes.") + .def("lanes", &Type::lanes, + "How many elements (if a vector type). Should be 1 for scalar types.") + .def("is_bool", &Type::is_bool, p::arg("self"), + "Is this type boolean (represented as UInt(1))?") + .def("is_vector", &Type::is_vector, p::arg("self"), + "Is this type a vector type? (lanes > 1)") + .def("is_scalar", &Type::is_scalar, p::arg("self"), + "Is this type a scalar type? (lanes == 1)") + .def("is_float", &Type::is_float, p::arg("self"), + "Is this type a floating point type (float or double).") + .def("is_int", &Type::is_int, p::arg("self"), + "Is this type a signed integer type?") + .def("is_uint", &Type::is_uint, p::arg("self"), + "Is this type an unsigned integer type?") + .def("is_handle", &Type::is_handle, p::arg("self"), + "Is this type an opaque handle type (void *)") + .def(p::self == p::self) + .def(p::self != p::self) + .def("with_lanes", &Type::with_lanes, p::args("self", "w"), + "Produce a copy of this type, with 'lanes' vector lanes") + .def("with_bits", &Type::with_bits, p::args("self", "w"), + "Produce a copy of this type, with 'bits' bits") + .def("element_of", &Type::element_of, p::arg("self"), + "Produce the type of a single element of this vector type") + .def("can_represent", can_represent_other_type, p::arg("other"), + "Can this type represent all values of another type?") + .def("max", &Type::max, p::arg("self"), + "Return an expression which is the maximum value of this type") + .def("min", &Type::min, p::arg("self"), + "Return an expression which is the minimum value of this type") + .def("__repr__", &type_repr, p::arg("self"), + "Return a string containing a printable representation of a Type object."); p::def("Int", h::Int, - (p::arg("bits"), p::arg("lanes")=1), + (p::arg("bits"), p::arg("lanes") = 1), "Constructing an signed integer type"); p::def("UInt", h::UInt, - (p::arg("bits"), p::arg("lanes")=1), + (p::arg("bits"), p::arg("lanes") = 1), "Constructing an unsigned integer type"); p::def("Float", h::Float, - (p::arg("bits"), p::arg("lanes")=1), + (p::arg("bits"), p::arg("lanes") = 1), "Constructing a floating-point type"); p::def("Bool", h::Bool, - (p::arg("lanes")=1), + (p::arg("lanes") = 1), "Construct a boolean type"); p::def("Handle", make_handle, - (p::arg("lanes")=1), + (p::arg("lanes") = 1), "Construct a handle type"); return; diff --git a/python_bindings/python/Type.h b/python_bindings/python/Type.h index b175b335e302..42fb6f3706d2 100644 --- a/python_bindings/python/Type.h +++ b/python_bindings/python/Type.h @@ -4,13 +4,12 @@ #include namespace Halide { -struct Type; // forward declaration +struct Type; // forward declaration } void defineType(); - -std::string type_repr(const Halide::Type &t); // helper function +std::string type_repr(const Halide::Type &t); // helper function std::string type_code_to_string(const Halide::Type &t); -#endif // TYPE_H +#endif // TYPE_H diff --git a/python_bindings/python/Var.cpp b/python_bindings/python/Var.cpp index 47c07b0dadc9..13b58a6f93b9 100644 --- a/python_bindings/python/Var.cpp +++ b/python_bindings/python/Var.cpp @@ -1,134 +1,123 @@ #include "Var.h" // to avoid compiler confusion, python.hpp must be include before Halide headers -#include #include "add_operators.h" +#include -#include "../../src/Var.h" #include "../../src/IROperator.h" +#include "../../src/Var.h" #include #include namespace h = Halide; -bool var_is_implicit0(h::Var &that) -{ +bool var_is_implicit0(h::Var &that) { return that.is_implicit(); } -bool var_is_implicit1(const std::string name) -{ +bool var_is_implicit1(const std::string name) { return h::Var::is_implicit(name); } - -int var_implicit_index0(h::Var &that) -{ +int var_implicit_index0(h::Var &that) { return that.is_implicit(); } -int var_implicit_index1(const std::string name) -{ +int var_implicit_index1(const std::string name) { return h::Var::is_implicit(name); } - -bool var_is_placeholder0(h::Var &that) -{ +bool var_is_placeholder0(h::Var &that) { return that.is_placeholder(); } -bool var_is_placeholder1(const std::string name) -{ +bool var_is_placeholder1(const std::string name) { return h::Var::is_placeholder(name); } - -h::Expr var_as_expr(h::Var &that) -{ +h::Expr var_as_expr(h::Var &that) { return static_cast(that); } - -std::string var_repr(const h::Var &var) -{ +std::string var_repr(const h::Var &var) { std::string repr; boost::format f(""); repr = boost::str(f % var.name()); return repr; } - -void defineVar() -{ +void defineVar() { using Halide::Var; - namespace p = boost::python; auto var_class = p::class_("Var", - "A Halide variable, to be used when defining functions. It is just" \ - "a name, and can be reused in places where no name conflict will" \ - "occur. It can be used in the left-hand-side of a function" \ - "definition, or as an Expr. As an Expr, it always has type Int(32).\n" \ - "\n" \ - "Constructors::\n" \ - "Var() -- Construct Var with an automatically-generated unique name\n" \ + "A Halide variable, to be used when defining functions. It is just" + "a name, and can be reused in places where no name conflict will" + "occur. It can be used in the left-hand-side of a function" + "definition, or as an Expr. As an Expr, it always has type Int(32).\n" + "\n" + "Constructors::\n" + "Var() -- Construct Var with an automatically-generated unique name\n" "Var(name) -- Construct Var with the given string name.\n", p::init(p::args("self", "name"))) - .def(p::init<>(p::arg("self"))) - //.add_property("name", &Var::name) // "Get the name of a Var.") - .def("name", &Var::name, p::arg("self"), - p::return_value_policy(), - "Get the name of a Var.") - .def("same_as", &Var::same_as, p::args("self", "other"), "Test if two Vars are the same.") - .def("__eq__", &Var::same_as, p::args("self", "other"), "Test if two Vars are the same.") - //.def(self == p::other()) - - .def("implicit", &Var::implicit, p::arg("n"), - "Implicit var constructor. Implicit variables are injected " - "automatically into a function call if the number of arguments " - "to the function are fewer than its dimensionality and a " - "placeholder (\"_\") appears in its argument list. Defining a " - "function to equal an expression containing implicit variables " - "similarly appends those implicit variables, in the same order, " - "to the left-hand-side of the definition where the placeholder " - "('_') appears.").staticmethod("implicit") - .def("is_implicit", &var_is_implicit0, p::arg("self"), - "Return whether the variable name is of the form for an implicit argument.") - .def("name_is_implicit", &var_is_implicit1, p::arg("name"), - "Return whether a variable name is of the form for an implicit argument.") - .staticmethod("name_is_implicit") - - .def("implicit_index", &var_implicit_index0, p::arg("self"), - "Return the argument index for a placeholder argument given its " - "name. Returns 0 for \\ref _0, 1 for \\ref _1, etc. " - "Returns -1 if the variable is not of implicit form. ") - .def("name_implicit_index", &var_implicit_index1, p::arg("name"), - "Return the argument index for a placeholder argument given its " - "name. Returns 0 for \\ref _0, 1 for \\ref _1, etc. " - "Returns -1 if the variable is not of implicit form. ") - .staticmethod("name_implicit_index") - - .def("is_placeholder", &var_is_placeholder0, p::arg("self"), - "Test if a var is the placeholder variable \\ref _") - .def("name_is_placeholder", &var_is_placeholder1, p::arg("name"), - "Test if a var is the placeholder variable \\ref _") - .staticmethod("name_is_placeholder") - - .def("expr", &var_as_expr, p::arg("self"), //operator Expr() const - "A Var can be treated as an Expr of type Int(32)") - - .def("gpu_blocks", &Var::gpu_blocks, // no args - "Vars to use for scheduling producer/consumer pairs on the gpu.").staticmethod("gpu_blocks") - .def("gpu_threads", &Var::gpu_threads, // no args - "Vars to use for scheduling producer/consumer pairs on the gpu.").staticmethod("gpu_threads") - - .def("outermost", &Var::outermost, // no args - "A Var that represents the location outside the outermost loop.").staticmethod("outermost") - - .def("__repr__", &var_repr, p::arg("self")); + .def(p::init<>(p::arg("self"))) + //.add_property("name", &Var::name) // "Get the name of a Var.") + .def("name", &Var::name, p::arg("self"), + p::return_value_policy(), + "Get the name of a Var.") + .def("same_as", &Var::same_as, p::args("self", "other"), "Test if two Vars are the same.") + .def("__eq__", &Var::same_as, p::args("self", "other"), "Test if two Vars are the same.") + //.def(self == p::other()) + + .def("implicit", &Var::implicit, p::arg("n"), + "Implicit var constructor. Implicit variables are injected " + "automatically into a function call if the number of arguments " + "to the function are fewer than its dimensionality and a " + "placeholder (\"_\") appears in its argument list. Defining a " + "function to equal an expression containing implicit variables " + "similarly appends those implicit variables, in the same order, " + "to the left-hand-side of the definition where the placeholder " + "('_') appears.") + .staticmethod("implicit") + .def("is_implicit", &var_is_implicit0, p::arg("self"), + "Return whether the variable name is of the form for an implicit argument.") + .def("name_is_implicit", &var_is_implicit1, p::arg("name"), + "Return whether a variable name is of the form for an implicit argument.") + .staticmethod("name_is_implicit") + + .def("implicit_index", &var_implicit_index0, p::arg("self"), + "Return the argument index for a placeholder argument given its " + "name. Returns 0 for \\ref _0, 1 for \\ref _1, etc. " + "Returns -1 if the variable is not of implicit form. ") + .def("name_implicit_index", &var_implicit_index1, p::arg("name"), + "Return the argument index for a placeholder argument given its " + "name. Returns 0 for \\ref _0, 1 for \\ref _1, etc. " + "Returns -1 if the variable is not of implicit form. ") + .staticmethod("name_implicit_index") + + .def("is_placeholder", &var_is_placeholder0, p::arg("self"), + "Test if a var is the placeholder variable \\ref _") + .def("name_is_placeholder", &var_is_placeholder1, p::arg("name"), + "Test if a var is the placeholder variable \\ref _") + .staticmethod("name_is_placeholder") + + .def("expr", &var_as_expr, p::arg("self"), //operator Expr() const + "A Var can be treated as an Expr of type Int(32)") + + .def("gpu_blocks", &Var::gpu_blocks, // no args + "Vars to use for scheduling producer/consumer pairs on the gpu.") + .staticmethod("gpu_blocks") + .def("gpu_threads", &Var::gpu_threads, // no args + "Vars to use for scheduling producer/consumer pairs on the gpu.") + .staticmethod("gpu_threads") + + .def("outermost", &Var::outermost, // no args + "A Var that represents the location outside the outermost loop.") + .staticmethod("outermost") + + .def("__repr__", &var_repr, p::arg("self")); ; add_operators(var_class); diff --git a/python_bindings/python/Var.h b/python_bindings/python/Var.h index a0f387048f0a..2692719d343c 100644 --- a/python_bindings/python/Var.h +++ b/python_bindings/python/Var.h @@ -3,4 +3,4 @@ void defineVar(); -#endif // VAR_H +#endif // VAR_H diff --git a/python_bindings/python/add_operators.cpp b/python_bindings/python/add_operators.cpp index 640d5240a70e..02de9b75f19d 100644 --- a/python_bindings/python/add_operators.cpp +++ b/python_bindings/python/add_operators.cpp @@ -1,4 +1,3 @@ #include "add_operators.h" // nothing to define here, only templates in the headers - diff --git a/python_bindings/python/add_operators.h b/python_bindings/python/add_operators.h index 457fb1549c45..8c7356dba357 100644 --- a/python_bindings/python/add_operators.h +++ b/python_bindings/python/add_operators.h @@ -5,11 +5,9 @@ #include //#include - -template +template //WrappedType floordiv(A /*a*/, B /*b*/) -auto floordiv(A a, B b) -> decltype(a / b) -{ +auto floordiv(A a, B b) -> decltype(a / b) { //throw std::invalid_argument("Halide floordiv not yet implemented, use '/' instead."); //return WrappedType(); @@ -18,76 +16,72 @@ auto floordiv(A a, B b) -> decltype(a / b) return a / b; } - - -template -void add_operators_with(PythonClass &class_instance) -{ +template +void add_operators_with(PythonClass &class_instance) { using namespace boost::python; typedef typename PythonClass::wrapped_type wrapped_t; // lists all operators class_instance - .def(self + other()) - .def(other() + self) + .def(self + other()) + .def(other() + self) - .def(self - other()) - .def(other() - self) + .def(self - other()) + .def(other() - self) - .def(self * other()) - .def(other() * self) + .def(self * other()) + .def(other() * self) - .def(self / other()) - .def(other() / self) + .def(self / other()) + .def(other() / self) - .def(self % other()) - .def(other() % self) + .def(self % other()) + .def(other() % self) - .def(pow(self, other())) - .def(pow(other(), self)) + .def(pow(self, other())) + .def(pow(other(), self)) - .def(self & other()) // and - .def(other() & self) + .def(self & other()) // and + .def(other() & self) - .def(self | other()) // or - .def(other() | self) + .def(self | other()) // or + .def(other() | self) - .def(self < other()) - .def(other() < self) + .def(self < other()) + .def(other() < self) - .def(self <= other()) - .def(other() <= self) + .def(self <= other()) + .def(other() <= self) - .def(self == other()) - .def(other() == self) + .def(self == other()) + .def(other() == self) - .def(self != other()) - .def(other() != self) + .def(self != other()) + .def(other() != self) - .def(self > other()) - .def(other() > self) + .def(self > other()) + .def(other() > self) - .def(self >= other()) - .def(other() >= self) + .def(self >= other()) + .def(other() >= self) - .def(self >> other()) - .def(other() >> self) + .def(self >> other()) + .def(other() >> self) - .def(self << other()) - .def(other() << self) + .def(self << other()) + .def(other() << self) - .def("__floordiv__", &floordiv) - .def("__floordiv__", &floordiv) + .def("__floordiv__", &floordiv) + .def("__floordiv__", &floordiv) - ; + ; return; } -template -void add_operators(PythonClass &class_instance) -{ +template +void add_operators(PythonClass &class_instance) { using namespace boost::python; typedef typename PythonClass::wrapped_type wrapped_t; @@ -101,17 +95,14 @@ void add_operators(PythonClass &class_instance) // Define unary operators // lists all operators class_instance - .def(-self) // neg - //.def(+self) // pos - .def(~self) // invert - //.def(abs(self)) - //.def(!!self) // nonzero - ; + .def(-self) // neg + //.def(+self) // pos + .def(~self) // invert + //.def(abs(self)) + //.def(!!self) // nonzero + ; return; } - - - -#endif // ADD_OPERATORS_H +#endif // ADD_OPERATORS_H