diff --git a/CMakeLists.txt b/CMakeLists.txt index bd07f2da..7c18799f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,14 +137,16 @@ set(CMAKE_SWIG_FLAGS "-nomoduleglobal") # Install prefix message(STATUS "Installation prefix (CMAKE_INSTALL_PREFIX): ${CMAKE_INSTALL_PREFIX}") +set(LOCALINSTALL "${CMAKE_BINARY_DIR}/out" CACHE STRING "Default localinstall dest") -set(ENV_SH ${CMAKE_CURRENT_BINARY_DIR}/out/env.sh) +set(ENV_SH ${LOCALINSTALL}/env.sh) add_custom_target(localinstall - COMMAND $(MAKE) install DESTDIR=${CMAKE_BINARY_DIR}/out - COMMAND echo "export LD_LIBRARY_PATH=\"${CMAKE_CURRENT_BINARY_DIR}/out${CMAKE_INSTALL_PREFIX}/lib:${CMAKE_CURRENT_BINARY_DIR}/out${CMAKE_INSTALL_PREFIX}/lib/haka/modules/protocol\"" > ${ENV_SH} - COMMAND echo "export HAKA_PATH=\"${CMAKE_CURRENT_BINARY_DIR}/out${CMAKE_INSTALL_PREFIX}\"" >> ${ENV_SH} - COMMAND echo "export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/out${CMAKE_INSTALL_PREFIX}/bin\":\"${CMAKE_CURRENT_BINARY_DIR}/out${CMAKE_INSTALL_PREFIX}/sbin\":$PATH" >> ${ENV_SH} + COMMAND $(MAKE) install DESTDIR=${LOCALINSTALL} + COMMAND echo "export LD_LIBRARY_PATH=\"${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/lib:${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/lib/haka:${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/lib/haka/modules/protocol:${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/lib/haka/modules/packet:${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/lib/haka/modules/misc\"" > ${ENV_SH} + COMMAND echo "export HAKA_PATH=\"${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}\"" >> ${ENV_SH} + COMMAND echo "export PATH=\"${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/bin\":\"${LOCALINSTALL}${CMAKE_INSTALL_PREFIX}/sbin\":$PATH" >> ${ENV_SH} + COMMAND echo "export LUA_PATH=\"\$(realpath \$HAKA_PATH/share/haka/lua/share/luajit* | head -n 1)/?.lua\"" >> ${ENV_SH} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "Install locally..." VERBATIM VERBATIM diff --git a/autobuild/build.sh b/autobuild/build.sh index f4e8aa71..c22542bf 100755 --- a/autobuild/build.sh +++ b/autobuild/build.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/configure.sh b/autobuild/configure.sh index 276319f6..7ab845f6 100755 --- a/autobuild/configure.sh +++ b/autobuild/configure.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/coverage.sh b/autobuild/coverage.sh index 807ae5a0..a8466f7e 100755 --- a/autobuild/coverage.sh +++ b/autobuild/coverage.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/doc.sh b/autobuild/doc.sh index c8260508..1d7355d0 100755 --- a/autobuild/doc.sh +++ b/autobuild/doc.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/includes.sh b/autobuild/includes.sh index 2a19ed8b..ef4e0875 100755 --- a/autobuild/includes.sh +++ b/autobuild/includes.sh @@ -1,5 +1,7 @@ #! /bin/bash -# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. _run() { echo $* diff --git a/autobuild/package.sh b/autobuild/package.sh index 2b2f25a8..56ca2355 100755 --- a/autobuild/package.sh +++ b/autobuild/package.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/staticanalysis.sh b/autobuild/staticanalysis.sh index 23e060de..f605d4dc 100755 --- a/autobuild/staticanalysis.sh +++ b/autobuild/staticanalysis.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/test.sh b/autobuild/test.sh index 17eef6bd..74197b76 100755 --- a/autobuild/test.sh +++ b/autobuild/test.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. source autobuild/vars.sh source autobuild/includes.sh diff --git a/autobuild/vars.sh b/autobuild/vars.sh index 9f4c7153..28c7faff 100755 --- a/autobuild/vars.sh +++ b/autobuild/vars.sh @@ -1,4 +1,7 @@ #! /bin/bash +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. export PATH=/bin:/usr/bin:/usr/local/bin export ROOT=$(pwd) diff --git a/build/FindJansson.cmake b/build/FindJansson.cmake new file mode 100644 index 00000000..56e8b2f6 --- /dev/null +++ b/build/FindJansson.cmake @@ -0,0 +1,15 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_path(JANSSON_INCLUDE_DIR jansson.h) +find_library(JANSSON_LIBRARY NAMES jansson) + +if(JANSSON_INCLUDE_DIR AND JANSSON_LIBRARY) + set(JANSSON_FOUND) +endif() + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(Jansson + REQUIRED_VARS JANSSON_LIBRARY JANSSON_INCLUDE_DIR) diff --git a/build/FindLibCurl.cmake b/build/FindLibCurl.cmake new file mode 100644 index 00000000..a7dd2225 --- /dev/null +++ b/build/FindLibCurl.cmake @@ -0,0 +1,15 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_path(LIBCURL_INCLUDE_DIR curl/curl.h) +find_library(LIBCURL_LIBRARY NAMES curl) + +if(LIBCURL_INCLUDE_DIR AND LIBCURL_LIBRARY) + set(LIBCURL_FOUND) +endif() + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(LibCurl + REQUIRED_VARS LIBCURL_LIBRARY LIBCURL_INCLUDE_DIR) diff --git a/build/FindLibGeoIP.cmake b/build/FindLibGeoIP.cmake new file mode 100644 index 00000000..da205569 --- /dev/null +++ b/build/FindLibGeoIP.cmake @@ -0,0 +1,15 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_path(LIBGEOIP_INCLUDE_DIR GeoIP.h) +find_library(LIBGEOIP_LIBRARY NAMES GeoIP) + +if(LIBGEOIP_INCLUDE_DIR AND LIBGEOIP_LIBRARY) + set(LIBGEOIP_FOUND) +endif() + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(LibGeoIP + REQUIRED_VARS LIBGEOIP_LIBRARY LIBGEOIP_INCLUDE_DIR) diff --git a/build/FindLibUuid.cmake b/build/FindLibUuid.cmake new file mode 100644 index 00000000..c1374826 --- /dev/null +++ b/build/FindLibUuid.cmake @@ -0,0 +1,15 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_path(LIBUUID_INCLUDE_DIR uuid/uuid.h) +find_library(LIBUUID_LIBRARY NAMES uuid) + +if(LIBUUID_INCLUDE_DIR AND LIBUUID_LIBRARY) + set(LIBUUID_FOUND) +endif() + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(LibUuid + REQUIRED_VARS LIBUUID_LIBRARY LIBUUID_INCLUDE_DIR) diff --git a/build/TestBenchRun.cmake b/build/TestBenchRun.cmake new file mode 100644 index 00000000..368c1829 --- /dev/null +++ b/build/TestBenchRun.cmake @@ -0,0 +1,23 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +set(ENV{LANG} "C") +set(ENV{LC_ALL} "C") +set(ENV{BUILD_DIR} ${CTEST_MODULE_DIR}) +set(ENV{LUA_PATH} ${PROJECT_SOURCE_DIR}/src/lua/?.lua) +set(ENV{HAKA_PATH} ${HAKA_PATH}) +set(ENV{PATH} $ENV{PATH}:${HAKA_PATH}/sbin:${HAKA_PATH}/bin) +set(ENV{LD_LIBRARY_PATH} ${HAKA_PATH}/lib:${HAKA_PATH}/lib/haka/modules/protocol:${HAKA_PATH}/lib/haka/modules/packet) +set(ENV{TZ} Europe/Paris) +set(ENV{CONF} ${CONF}) + +set(CMAKE_MODULE_PATH ${CTEST_MODULE_DIR} ${CMAKE_MODULE_PATH}) + +message("Executing TZ=\"Europe/Paris\" LANG=\"C\" LC_ALL=\"C\" LUA_PATH=\"$ENV{LUA_PATH}\" HAKA_PATH=\"$ENV{HAKA_PATH}\" LD_LIBRARY_PATH=\"$ENV{LD_LIBRARY_PATH}\" CONF=\"$ENV{CONF}\" ${EXE} ${BENCH}") + +execute_process(COMMAND ${EXE} ${BENCH} RESULT_VARIABLE HAD_ERROR) + +if(HAD_ERROR) + message(FATAL_ERROR "Benchmark script failed") +endif(HAD_ERROR) diff --git a/doc/ref/hakamodule.rst b/doc/ref/hakamodule.rst new file mode 100644 index 00000000..41e13e3b --- /dev/null +++ b/doc/ref/hakamodule.rst @@ -0,0 +1,11 @@ +.. This Source Code Form is subject to the terms of the Mozilla Public +.. License, v. 2.0. If a copy of the MPL was not distributed with this +.. file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Haka modules +============ + +.. toctree:: + + ../../../modules/misc/geoip/doc/geoip.rst + ../../../modules/misc/elasticsearch/doc/elasticsearch.rst diff --git a/doc/ref/refindex.rst b/doc/ref/refindex.rst index 4aff59e4..fd04334e 100644 --- a/doc/ref/refindex.rst +++ b/doc/ref/refindex.rst @@ -16,6 +16,7 @@ Welcome to Haka's User Reference Guide! alert.rst regexp.rst hakadissector.rst + hakamodule.rst rule.rst grammar.rst state_machine.rst diff --git a/doc/theme/haka/static/haka.css_t b/doc/theme/haka/static/haka.css_t index be7ba570..b65f88fa 100644 --- a/doc/theme/haka/static/haka.css_t +++ b/doc/theme/haka/static/haka.css_t @@ -220,11 +220,16 @@ li.toctree-l2 { padding-left: 20px; } +/* Admonition */ +div.admonition { + border-radius: 5px; +} + /* Exercise */ div.admonition-exercise { background: none repeat scroll 0 0 #f2f8ec; border: 1px solid #99CC66; - border-radius: 5px 5px 5px 5px; + border-radius: 5px; } div.admonition-exercise > p, diff --git a/doc/user/tool_suite_haka.rst b/doc/user/tool_suite_haka.rst index 62a3c1db..edb0542e 100644 --- a/doc/user/tool_suite_haka.rst +++ b/doc/user/tool_suite_haka.rst @@ -128,6 +128,7 @@ Example .. literalinclude:: ../../sample/gettingstarted/gettingstarted.conf :tab-width: 4 + :language: ini Service ------- diff --git a/doc/user/workshop.rst b/doc/user/workshop.rst index 7f4b148f..7609b31d 100644 --- a/doc/user/workshop.rst +++ b/doc/user/workshop.rst @@ -21,5 +21,6 @@ full environment containing Haka with all its dependencies. workshop/filter.rst workshop/modif.rst workshop/smtp.rst + workshop/hakabana.rst workshop/goingfurther.rst workshop/appendix.rst diff --git a/doc/user/workshop/hakabana.rst b/doc/user/workshop/hakabana.rst new file mode 100644 index 00000000..23c99b33 --- /dev/null +++ b/doc/user/workshop/hakabana.rst @@ -0,0 +1,71 @@ +.. This Source Code Form is subject to the terms of the Mozilla Public +.. License, v. 2.0. If a copy of the MPL was not distributed with this +.. file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Hakabana +======== + +Hakabana is a monitoring tool that uses Kibana and Elasticsearch to visualize +traffic passing through Haka in *real-time*. The package is already installed +in the haka-live iso but can be downloaded directly from http://www.haka-security.org. + +Getting started +^^^^^^^^^^^^^^^ + +Hakabana module is installed at ``/usr/share/haka/modules/misc/hakabana``. +It consists of a set of security rules that export network traffic to +Elasticsearch server. They are then displayed thanks to our Kibana dashboard. + +Hakabana ships with a default configuration allowing starting quickly with +traffic monitoring. It is available in ``/usr/share/haka/hakabana`` + +.. admonition:: Exercise + + * follow the instruction below to start haka: + + .. code-block:: console + + cd /usr/local/share/haka/hakabana + haka -c haka.conf + + * visit the url: http://localhost/kibana/ and load hakabana dashboard + from ``/usr/share/haka/hakabana/dashboard/`` + +I want more DNS info +^^^^^^^^^^^^^^^^^^^^ + +Your goal here is to customize the security rules in order to export extra data. + +.. admonition:: Exercise + + * update the ``dns.lua`` in order to export dns types. + + * add a panel to hakabana dashboard to display dns types. + + +Geo localization +^^^^^^^^^^^^^^^^ + +Hakabana features a `geoip` module allowing to get the country code associated to an ip +address. Here is an example using it: + +.. code-block:: lua + + local ipv4 = require('protocol/ipv4') + + local geoip_module = require('misc/geoip') + local geoip = geoip_module.open('/usr/share/GeoIP/GeoIP.dat') + + haka.rule { + hook = ipv4.events.receive_packet, + eval = function (pkt) + local dst = pkt.dst + haka.log("geoip", "ip %s from %s",dst, geoip:country(dst)) + end + } + +.. admonition:: Exercise + + * update the ``flow.lua`` file in order to exclude traffic addressed to a given + country. + diff --git a/external/luajit/luajit.cmake b/external/luajit/luajit.cmake index 6b972e01..a7e89c79 100644 --- a/external/luajit/luajit.cmake +++ b/external/luajit/luajit.cmake @@ -58,11 +58,11 @@ add_custom_target(luajit ) set(LUA_DIR ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}) -set(LUA_INCLUDE_DIR ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/include/luajit-2.0) +set(LUA_INCLUDE_DIR ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/src) set(LUA_LIBRARY_DIR ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/lib/) -set(LUA_LIBRARIES ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/lib/libluajit-5.1.a) +set(LUA_LIBRARIES ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/src/libluajit.a) -set(LUA_COMPILER ${CMAKE_SOURCE_DIR}/external/luajit/luajitc -p "${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/") +set(LUA_COMPILER ${CMAKE_SOURCE_DIR}/external/luajit/luajitc -p "${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/src/") set(LUA_FLAGS_NONE "-g") set(LUA_FLAGS_DEBUG "-g") set(LUA_FLAGS_MEMCHECK "-g") @@ -70,9 +70,18 @@ set(LUA_FLAGS_RELEASE "-s") set(LUA_FLAGS_RELWITHDEBINFO "-g") set(LUA_FLAGS_MINSIZEREL "-s") -install(DIRECTORY ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/share/lua DESTINATION share/haka/lua/share) -install(DIRECTORY ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/share/luajit-2.0.3 DESTINATION share/haka/lua/share) -install(DIRECTORY ${LUA_INCLUDE_DIR}/ DESTINATION include/haka/lua) +install(DIRECTORY ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/${INSTALL_FULLDIR}/share/ + DESTINATION share/haka/lua/share + PATTERN man* EXCLUDE +) + +install(DIRECTORY ${CMAKE_BINARY_DIR}/${LUAJIT_DIR}/src/ + DESTINATION include/haka/lua + FILES_MATCHING PATTERN *.h + PATTERN lj_* EXCLUDE + PATTERN host* EXCLUDE + PATTERN jit EXCLUDE +) set(HAKA_LUAJIT 1) set(HAKA_LUA51 1) diff --git a/external/luajit/luajitc b/external/luajit/luajitc index ba85ed62..298eded8 100755 --- a/external/luajit/luajitc +++ b/external/luajit/luajitc @@ -18,7 +18,6 @@ done shift $((OPTIND - 1)) -export LD_LIBRARY_PATH="$LUAJIT_PATH/lib" -export LUA_PATH="$LUAJIT_PATH/share/luajit-2.0.3/?.lua" +export LUA_PATH="$LUAJIT_PATH/?.lua" -$LUAJIT_PATH/bin/luajit -b $OPTIONS $1 $OUTPUT +$LUAJIT_PATH/luajit -b $OPTIONS $1 $OUTPUT diff --git a/include/haka/alert.h b/include/haka/alert.h index 9d6eb855..144f493e 100644 --- a/include/haka/alert.h +++ b/include/haka/alert.h @@ -10,7 +10,6 @@ #ifndef _HAKA_ALERT_H #define _HAKA_ALERT_H -#include #include #include #include @@ -55,7 +54,7 @@ typedef enum { */ struct alert_node { alert_node_type type; /**< Alert node type. */ - wchar_t **list; /**< NULL terminated array of strings. */ + char **list; /**< NULL terminated array of strings. */ }; /** @@ -64,13 +63,13 @@ struct alert_node { struct alert { struct time start_time; /**< Alert time. */ struct time end_time; /**< Alert time. */ - wchar_t *description; /**< Alert description. */ + char *description; /**< Alert description. */ alert_level severity; /**< Alert severity (HAKA_ALERT_NUMERIC is not a valid value here). */ alert_level confidence; /**< Alert confidence. */ double confidence_num; /**< Alert confidence numeric value if confidence == HAKA_ALERT_NUMERIC. */ alert_completion completion; /**< Alert completion. */ - wchar_t *method_description; /**< Alert method description. */ - wchar_t **method_ref; /**< Alert method references (NULL terminated array). */ + char *method_description; /**< Alert method description. */ + char **method_ref; /**< Alert method references (NULL terminated array). */ struct alert_node **sources; /**< Alert sources (NULL terminated array of nodes). */ struct alert_node **targets; /**< Alert targets (NULL terminated array of nodes). */ size_t alert_ref_count; /**< Reference count. */ @@ -103,12 +102,12 @@ struct alert { * \param name ``sources`` or ``target``. * \param index Index of the node. * \param type Type of node (see :c:type:`alert_node_type`). - * \param ... List of strings (wchar_t *). + * \param ... List of strings. */ #define ALERT_NODE(alert, name, index, type, ...) \ struct alert_node _node##name##index = { type }; \ alert.name[index] = &_node##name##index; \ - wchar_t *_node##name##index_list[] = { __VA_ARGS__, NULL }; \ + char *_node##name##index_list[] = { __VA_ARGS__, NULL }; \ _node##name##index.list = _node##name##index_list /** @@ -127,10 +126,10 @@ struct alert { * Add method references. * * \param alert Alert name. - * \param ... List of strings (wchar_t *). + * \param ... List of strings. */ #define ALERT_METHOD_REF(alert, ...) \ - wchar_t *_method_ref[] = { __VA_ARGS__, NULL }; \ + char *_method_ref[] = { __VA_ARGS__, NULL }; \ alert.method_ref = _method_ref /** @@ -145,10 +144,26 @@ uint64 alert(const struct alert *alert); */ bool alert_update(uint64 id, const struct alert *alert); +/** + * Convert alert level to human readable string. + */ +const char *alert_level_to_str(alert_level level); + +/** + * Convert alert completion to human readable string. + */ +const char *alert_completion_to_str(alert_completion completion); + +/** + * Convert alert node to human readable string. + */ +const char *alert_node_to_str(alert_node_type type); + /** * Convert an alert to a string. */ -const wchar_t *alert_tostring(uint64 id, const struct time *time, const struct alert *alert, const char *header, const char *indent, bool color); +const char *alert_tostring(uint64 id, const struct time *time, const struct alert *alert, + const char *header, const char *indent, bool color); /** * Enable display of alerts on stdout. diff --git a/include/haka/compiler.h b/include/haka/compiler.h index 3d4ffef2..1fbf6fd2 100644 --- a/include/haka/compiler.h +++ b/include/haka/compiler.h @@ -9,6 +9,8 @@ #define PACKED __attribute__((packed)) +#define FORMAT_PRINTF(fmt, args) __attribute__((format(printf, fmt, args))) + #define INIT __attribute__((constructor(32767))) #define INIT_P(p) __attribute__((constructor(p))) #define FINI __attribute__((destructor(32767))) diff --git a/include/haka/container/vector.h b/include/haka/container/vector.h index 2a256e77..96a40b61 100644 --- a/include/haka/container/vector.h +++ b/include/haka/container/vector.h @@ -26,8 +26,8 @@ struct vector { #define vector_get(v, type, index) ((type*)_vector_get((v), sizeof(type), (index))) #define vector_set(v, type, index, value) (*(type*)_vector_get((v), sizeof(type), (index)) = (value)) #define vector_push(v, type) ((type*)_vector_push((v), sizeof(type))) -#define vector_last(v, type) vector_get((v), (type), vector_count(v)-1) -#define vector_first(v, type) vector_get((v), (type), 0) +#define vector_last(v, type) ((type*)_vector_get((v), sizeof(type), vector_count(v)-)) +#define vector_first(v, type) ((type*)_vector_get((v), sizeof(type), 0)) INLINE size_t vector_count(struct vector *v) { return v->count; } INLINE bool vector_isempty(struct vector *v) { return v->count == 0; } diff --git a/include/haka/error.h b/include/haka/error.h index 80819acd..2e76db36 100644 --- a/include/haka/error.h +++ b/include/haka/error.h @@ -10,8 +10,8 @@ #ifndef _HAKA_ERROR_H #define _HAKA_ERROR_H -#include #include +#include @@ -22,7 +22,7 @@ * * If lua is at the origin of this call, the error will be converted to a lua error. */ -void error(const wchar_t *error, ...); +void error(const char *error, ...) FORMAT_PRINTF(1, 2); /** * Convert the `err` value to a human readable error message. @@ -40,6 +40,6 @@ bool check_error(); /** * Get the error message and clear the error state. */ -const wchar_t *clear_error(); +const char *clear_error(); #endif /* _HAKA_ERROR_H */ diff --git a/include/haka/log.h b/include/haka/log.h index 1d157fb5..80ad2307 100644 --- a/include/haka/log.h +++ b/include/haka/log.h @@ -10,8 +10,8 @@ #ifndef _HAKA_LOG_H #define _HAKA_LOG_H -#include #include +#include #include @@ -46,23 +46,23 @@ log_level str_to_level(const char *str); /** * Log a message without string formating. */ -void message(log_level level, const wchar_t *module, const wchar_t *message); +void message(log_level level, const char *module, const char *message); /** * Log a message with string formating. */ -void messagef(log_level level, const wchar_t *module, const wchar_t *fmt, ...); +void messagef(log_level level, const char *module, const char *fmt, ...) FORMAT_PRINTF(3, 4); /** * Set the logging level to display for a given module name. The `module` parameter can be * `NULL` in which case it will set the default level. */ -void setlevel(log_level level, const wchar_t *module); +void setlevel(log_level level, const char *module); /** * Get the logging level for a given module name. */ -log_level getlevel(const wchar_t *module); +log_level getlevel(const char *module); /** * Change the display of log message on stdout. @@ -72,7 +72,7 @@ void enable_stdout_logging(bool enable); /** * Show a log line on the stdout. */ -bool stdout_message(log_level lvl, const wchar_t *module, const wchar_t *message); +bool stdout_message(log_level lvl, const char *module, const char *message); /** * Logger instance structure. @@ -80,7 +80,7 @@ bool stdout_message(log_level lvl, const wchar_t *module, const wchar_t *message struct logger { struct list list; void (*destroy)(struct logger *state); - int (*message)(struct logger *state, log_level level, const wchar_t *module, const wchar_t *message); + int (*message)(struct logger *state, log_level level, const char *module, const char *message); bool mark_for_remove; /**< \private */ }; diff --git a/include/haka/lua/array.si b/include/haka/lua/array.si index cf2155f6..f86f0104 100644 --- a/include/haka/lua/array.si +++ b/include/haka/lua/array.si @@ -12,7 +12,7 @@ static struct type **_get_##type##_array(lua_State* L, int input, swig_type_info int i, size = lua_rawlen(L, input); array = malloc(sizeof(struct type*)*(size+1)); if (!array) { - error(L"memory error"); + error("memory error"); return NULL; } for (i = 0, iter = array; i < size; ++iter, ++i) { diff --git a/include/haka/lua/ref.h b/include/haka/lua/ref.h index 4baefa15..625c0849 100644 --- a/include/haka/lua/ref.h +++ b/include/haka/lua/ref.h @@ -23,7 +23,7 @@ struct lua_ref { void lua_ref_init(struct lua_ref *ref); bool lua_ref_isvalid(struct lua_ref *ref); -void lua_ref_get(struct lua_State *state, struct lua_ref *ref); +void lua_ref_get(struct lua_State *state, struct lua_ref *ref, int index); bool lua_ref_clear(struct lua_ref *ref); void lua_ref_push(struct lua_State *state, struct lua_ref *ref); diff --git a/include/haka/lua/ref.si b/include/haka/lua/ref.si index 3145d046..8dac2c47 100644 --- a/include/haka/lua/ref.si +++ b/include/haka/lua/ref.si @@ -8,9 +8,8 @@ %typemap(in) struct lua_ref %{ - lua_pushvalue(L, $input); lua_ref_init(&$1); - lua_ref_get(L, &$1); + lua_ref_get(L, &$1, $input); %} %typemap(out) struct lua_ref diff --git a/include/haka/lua/state.h b/include/haka/lua/state.h index 7ded9640..2aac8141 100644 --- a/include/haka/lua/state.h +++ b/include/haka/lua/state.h @@ -29,7 +29,7 @@ bool lua_state_run_file(struct lua_state *L, const char *filename, int argc, cha void lua_state_trigger_haka_event(struct lua_state *state, const char *event); int lua_state_error_formater(struct lua_State *L); -void lua_state_print_error(struct lua_State *L, const wchar_t *msg); +void lua_state_print_error(struct lua_State *L, const char *msg); struct lua_state *lua_state_get(struct lua_State *L); extern void (*lua_state_error_hook)(struct lua_State *L); diff --git a/include/haka/lua/swig.si b/include/haka/lua/swig.si index 92280f85..513a27fa 100644 --- a/include/haka/lua/swig.si +++ b/include/haka/lua/swig.si @@ -18,6 +18,8 @@ %{ #include + #include + #include #include #include @@ -61,8 +63,51 @@ } #define SWIG_Lua_class_register SWIG_Lua_class_register_custom + + /* Fix the thread safety of module loading. + * + * The goal is to wrap the function created by SWIG named SWIG_init in + * a lock. To do this we use a trick to redefine the macro SWIG_init to + * force our function to be called instead of the default one. + * + * SWIG_init and SWIG_init_user is a marco defined by SWIG which uses the + * module name. The real function SWIG_init is defined at the end of the + * generated file. + */ + #define SWIG_custom_init CONCAT(SWIG_init_user, _init) + + static mutex_t luaopen_lock = MUTEX_INIT; + + #ifdef __cplusplus + extern "C" { + #endif + + #if ((SWIG_LUA_TARGET == SWIG_LUA_FLAVOR_ELUA) || (SWIG_LUA_TARGET == SWIG_LUA_FLAVOR_ELUAC)) + LUALIB_API int SWIG_custom_init(lua_State* L); + #else + SWIGEXPORT int SWIG_custom_init(lua_State* L); + #endif + + #if ((SWIG_LUA_TARGET == SWIG_LUA_FLAVOR_ELUA) || (SWIG_LUA_TARGET == SWIG_LUA_FLAVOR_ELUAC)) + LUALIB_API int SWIG_init(lua_State* L) + #else + SWIGEXPORT int SWIG_init(lua_State* L) /* default Lua action */ + #endif + { + #undef SWIG_init + #define SWIG_init SWIG_custom_init + + mutex_lock(&luaopen_lock); + SWIG_init(L); + mutex_unlock(&luaopen_lock); + } + + #ifdef __cplusplus + } + #endif %} + %insert("init") %{ if (luaL_loadbuffer(L, SWIG_LUACODE, strlen(SWIG_LUACODE), SWIG_name)) { luaL_error(L, lua_tostring(L, -1)); @@ -82,14 +127,10 @@ %} %exception %{ - const wchar_t *error; + const char *error; $action if ((error = clear_error())) { - if (!lua_pushwstring(L, error)) { - if (!lua_pushwstring(L, clear_error())) { - lua_pushstring(L, "unknown error"); - } - } + lua_pushstring(L, error); SWIG_execfail; } %} @@ -120,11 +161,11 @@ %{ void type ## ___getitem(struct type *self, char *KEY) { - error(L"unknown field '%s'", KEY); + error("unknown field '%s'", KEY); } void type ## ___setitem(struct type *self, char *KEY, void *ANY) { - error(L"unknown field '%s'", KEY); + error("unknown field '%s'", KEY); } %} diff --git a/include/haka/macro.h b/include/haka/macro.h index 69cc16d8..a42dfa22 100644 --- a/include/haka/macro.h +++ b/include/haka/macro.h @@ -6,7 +6,9 @@ #define _HAKA_MACRO_H #define _STR(v) #v - #define STR(v) _STR(v) +#define _CONCAT(a, b) a ## b +#define CONCAT(a, b) _CONCAT(a, b) + #endif /* _HAKA_MACRO_H */ diff --git a/include/haka/module.h b/include/haka/module.h index 8d29839d..9f55f995 100644 --- a/include/haka/module.h +++ b/include/haka/module.h @@ -37,9 +37,9 @@ struct module { enum module_type type; /**< Module type */ - const wchar_t *name; /**< Module name. */ - const wchar_t *description; /**< Module description. */ - const wchar_t *author; /**< Module author. */ + const char *name; /**< Module name. */ + const char *description; /**< Module description. */ + const char *author; /**< Module author. */ int api_version; /**< API version (use HAKA_API_VERSION). */ /** diff --git a/include/haka/packet.h b/include/haka/packet.h index a574a100..11f1d1b4 100644 --- a/include/haka/packet.h +++ b/include/haka/packet.h @@ -17,6 +17,7 @@ #include #include #include +#include /** Opaque packet structure. */ @@ -24,6 +25,7 @@ struct packet { struct lua_object lua_object; /**< \private */ atomic_t ref; /**< \private */ struct vbuffer payload; /**< \private */ + struct lua_ref userdata; }; /** \cond */ diff --git a/include/haka/time.h b/include/haka/time.h index acd62d06..823ea9bc 100644 --- a/include/haka/time.h +++ b/include/haka/time.h @@ -71,11 +71,17 @@ int time_cmp(const struct time *t1, const struct time *t2); double time_sec(const struct time *t); /** - * Convert time to a string + * Convert time to a string. * \see TIME_BUFSIZE */ bool time_tostring(const struct time *t, char *buffer, size_t len); +/** + * Convert time to a formated string. + * \see strftime() + */ +bool time_format(const struct time *t, const char *format, char *buffer, size_t len); + /** * Check if the time is valid. */ diff --git a/lib/haka/CMakeLists.txt b/lib/haka/CMakeLists.txt index a76e1f0c..460bbc83 100644 --- a/lib/haka/CMakeLists.txt +++ b/lib/haka/CMakeLists.txt @@ -80,12 +80,12 @@ set_target_properties(libhaka PROPERTIES VERSION ${HAKA_VERSION_MAJOR}.${HAKA_VE find_package(DL REQUIRED) find_package(Editline REQUIRED) -target_link_libraries(libhaka ${DL_LIBRARIES}) -target_link_libraries(libhaka ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(libhaka LINK_PRIVATE ${DL_LIBRARIES}) +target_link_libraries(libhaka LINK_PRIVATE ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(libhaka LINK_PRIVATE libiniparser) -target_link_libraries(libhaka rt) -target_link_libraries(libhaka m) -target_link_libraries(libhaka ${EDITLINE_LIBRARY}) +target_link_libraries(libhaka LINK_PRIVATE rt) +target_link_libraries(libhaka LINK_PRIVATE m) +target_link_libraries(libhaka LINK_PRIVATE ${EDITLINE_LIBRARY}) target_link_libraries(libhaka LINK_PRIVATE ${LUA_LIBRARIES}) add_dependencies(libhaka ${LUA_DEPENDENCY}) diff --git a/lib/haka/alert.c b/lib/haka/alert.c index d9423a4a..e425ab23 100644 --- a/lib/haka/alert.c +++ b/lib/haka/alert.c @@ -22,7 +22,7 @@ static local_storage_t alert_string_key; static atomic64_t alert_id; static rwlock_t alert_module_lock = RWLOCK_INIT; -#define BUFFER_SIZE 2048 +#define BUFFER_SIZE 3072 static void alert_string_delete(void *value) { @@ -45,7 +45,7 @@ FINI static void _alert_fini() atomic64_destroy(&alert_id); { - wchar_t *buffer = local_storage_get(&alert_string_key); + char *buffer = local_storage_get(&alert_string_key); if (buffer) { alert_string_delete(buffer); } @@ -93,7 +93,7 @@ bool remove_alerter(struct alerter *alerter) } if (!iter) { - error(L"Alert module is not registered"); + error("Alert module is not registered"); return false; } @@ -180,11 +180,11 @@ bool alert_update(uint64 id, const struct alert *alert) return id; } -static wchar_t *alert_string_context() +static char *alert_string_context() { - wchar_t *context = (wchar_t *)local_storage_get(&alert_string_key); + char *context = (char *)local_storage_get(&alert_string_key); if (!context) { - context = malloc(sizeof(wchar_t)*BUFFER_SIZE); + context = malloc(sizeof(char)*BUFFER_SIZE); assert(context); local_storage_set(&alert_string_key, context); @@ -200,7 +200,7 @@ static const char *str_alert_level[HAKA_ALERT_LEVEL_LAST] = { "", }; -static const char *alert_level_to_str(alert_level level) +const char *alert_level_to_str(alert_level level) { assert(level >= 0 && level < HAKA_ALERT_LEVEL_LAST); return str_alert_level[level]; @@ -212,7 +212,7 @@ static const char *str_alert_completion[HAKA_ALERT_COMPLETION_LAST] = { "successful", }; -static const char *alert_completion_to_str(alert_completion completion) +const char *alert_completion_to_str(alert_completion completion) { assert(completion >= 0 && completion < HAKA_ALERT_COMPLETION_LAST); return str_alert_completion[completion]; @@ -223,57 +223,57 @@ static const char *str_alert_node_type[HAKA_ALERT_NODE_LAST] = { "service", }; -static const char *alert_node_to_str(alert_node_type type) +const char *alert_node_to_str(alert_node_type type) { assert(type >= 0 && type < HAKA_ALERT_NODE_LAST); return str_alert_node_type[type]; } -static void alert_string_append(wchar_t **buffer, size_t *len, wchar_t *format, ...) +FORMAT_PRINTF(3, 4) static void alert_string_append(char **buffer, size_t *len, char *format, ...) { int count; va_list ap; va_start(ap, format); - count = vswprintf(*buffer, *len, format, ap); + count = vsnprintf(*buffer, *len, format, ap); *buffer += count; *len -= count; va_end(ap); } -static void alert_stringlist_append(wchar_t **buffer, size_t *len, wchar_t **array) +static void alert_stringlist_append(char **buffer, size_t *len, char **array) { if (array) { - wchar_t **iter; + char **iter; for (iter = array; *iter; ++iter) { if (iter != array) - alert_string_append(buffer, len, L","); - alert_string_append(buffer, len, L" %ls", *iter); + alert_string_append(buffer, len, ","); + alert_string_append(buffer, len, " %s", *iter); } } } -static void alert_array_append(wchar_t **buffer, size_t *len, wchar_t **array) +static void alert_array_append(char **buffer, size_t *len, char **array) { - alert_string_append(buffer, len, L"{"); + alert_string_append(buffer, len, "{"); alert_stringlist_append(buffer, len, array); - alert_string_append(buffer, len, L" }"); + alert_string_append(buffer, len, " }"); } -static void alert_nodes_append(wchar_t **buffer, size_t *len, struct alert_node **array, const char *indent, char *color, char *clear) +static void alert_nodes_append(char **buffer, size_t *len, struct alert_node **array, const char *indent, char *color, char *clear) { struct alert_node **iter; - alert_string_append(buffer, len, L"{"); + alert_string_append(buffer, len, "{"); for (iter = array; *iter; ++iter) { - alert_string_append(buffer, len, L"%s\t%s%s%s:", indent, color, alert_node_to_str((*iter)->type), clear); + alert_string_append(buffer, len, "%s\t%s%s%s:", indent, color, alert_node_to_str((*iter)->type), clear); alert_stringlist_append(buffer, len, (*iter)->list); } - alert_string_append(buffer, len, L"%s}", indent); + alert_string_append(buffer, len, "%s}", indent); } -const wchar_t *alert_tostring(uint64 id, const struct time *time, const struct alert *alert, const char *header, const char *indent, bool colored) +const char *alert_tostring(uint64 id, const struct time *time, const struct alert *alert, const char *header, const char *indent, bool colored) { - wchar_t *buffer = alert_string_context(); - wchar_t *iter = buffer; + char *buffer = alert_string_context(); + char *iter = buffer; size_t len = BUFFER_SIZE; char *color = "", *clear = ""; @@ -282,85 +282,85 @@ const wchar_t *alert_tostring(uint64 id, const struct time *time, const struct a clear = CLEAR; } - alert_string_append(&iter, &len, L"%s%sid%s = %llu", header, color, clear, id); + alert_string_append(&iter, &len, "%s%sid%s = %llu", header, color, clear, id); { char timestr[TIME_BUFSIZE]; time_tostring(time, timestr, TIME_BUFSIZE); - alert_string_append(&iter, &len, L"%s%stime%s = %s", indent, color, clear, timestr); + alert_string_append(&iter, &len, "%s%stime%s = %s", indent, color, clear, timestr); } if (time_isvalid(&alert->start_time)) { char timestr[TIME_BUFSIZE]; time_tostring(&alert->start_time, timestr, TIME_BUFSIZE); - alert_string_append(&iter, &len, L"%s%sstart time%s = %s", indent, color, clear, timestr); + alert_string_append(&iter, &len, "%s%sstart time%s = %s", indent, color, clear, timestr); } if (time_isvalid(&alert->end_time)) { char timestr[TIME_BUFSIZE]; time_tostring(&alert->end_time, timestr, TIME_BUFSIZE); - alert_string_append(&iter, &len, L"%s%send time%s = %s", indent, color, clear, timestr); + alert_string_append(&iter, &len, "%s%send time%s = %s", indent, color, clear, timestr); } if (alert->severity > HAKA_ALERT_LEVEL_NONE && alert->severity < HAKA_ALERT_NUMERIC) { - alert_string_append(&iter, &len, L"%s%sseverity%s = %s", indent, color, clear, + alert_string_append(&iter, &len, "%s%sseverity%s = %s", indent, color, clear, alert_level_to_str(alert->severity)); } if (alert->confidence > HAKA_ALERT_LEVEL_NONE) { if (alert->confidence == HAKA_ALERT_NUMERIC) { - alert_string_append(&iter, &len, L"%s%sconfidence%s = %g", indent, color, clear, + alert_string_append(&iter, &len, "%s%sconfidence%s = %g", indent, color, clear, alert->confidence_num); } else { - alert_string_append(&iter, &len, L"%s%sconfidence%s = %s", indent, color, clear, + alert_string_append(&iter, &len, "%s%sconfidence%s = %s", indent, color, clear, alert_level_to_str(alert->confidence)); } } if (alert->completion > HAKA_ALERT_COMPLETION_NONE) { - alert_string_append(&iter, &len, L"%s%scompletion%s = %s", indent, color, clear, + alert_string_append(&iter, &len, "%s%scompletion%s = %s", indent, color, clear, alert_completion_to_str(alert->completion)); } if (alert->description) - alert_string_append(&iter, &len, L"%s%sdescription%s = %ls", indent, color, clear, alert->description); + alert_string_append(&iter, &len, "%s%sdescription%s = %s", indent, color, clear, alert->description); if (alert->method_description || alert->method_ref) { - alert_string_append(&iter, &len, L"%s%smethod%s = {", indent, color, clear); + alert_string_append(&iter, &len, "%s%smethod%s = {", indent, color, clear); if (alert->method_description) { - alert_string_append(&iter, &len, L"%s%s\tdescription%s = %ls", indent, color, clear, + alert_string_append(&iter, &len, "%s%s\tdescription%s = %s", indent, color, clear, alert->method_description); } if (alert->method_ref) { - alert_string_append(&iter, &len, L"%s%s\tref%s = ", indent, color, clear); + alert_string_append(&iter, &len, "%s%s\tref%s = ", indent, color, clear); alert_array_append(&iter, &len, alert->method_ref); } - alert_string_append(&iter, &len, L"%s}", indent); + alert_string_append(&iter, &len, "%s}", indent); } if (alert->sources) { - alert_string_append(&iter, &len, L"%s%ssources%s = ", indent, color, clear); + alert_string_append(&iter, &len, "%s%ssources%s = ", indent, color, clear); alert_nodes_append(&iter, &len, alert->sources, indent, color, clear); } if (alert->targets) { - alert_string_append(&iter, &len, L"%s%stargets%s = ", indent, color, clear); + alert_string_append(&iter, &len, "%s%stargets%s = ", indent, color, clear); alert_nodes_append(&iter, &len, alert->targets, indent, color, clear); } if (alert->alert_ref_count && alert->alert_ref) { int i; - alert_string_append(&iter, &len, L"%s%srefs%s = {", indent, color, clear); + alert_string_append(&iter, &len, "%s%srefs%s = {", indent, color, clear); for (i=0; ialert_ref_count; ++i) { if (i != 0) - alert_string_append(&iter, &len, L","); - alert_string_append(&iter, &len, L" %llu", alert->alert_ref[i]); + alert_string_append(&iter, &len, ","); + alert_string_append(&iter, &len, " %llu", alert->alert_ref[i]); } - alert_string_append(&iter, &len, L" }"); + alert_string_append(&iter, &len, " }"); } diff --git a/lib/haka/alert_module.c b/lib/haka/alert_module.c index dfb4f741..a71adff3 100644 --- a/lib/haka/alert_module.c +++ b/lib/haka/alert_module.c @@ -23,7 +23,7 @@ struct alerter *alert_module_alerter(struct module *module, struct parameters *a struct alert_module *alert_module; if (module->type != MODULE_ALERT) { - error(L"invalid module type: not an alert module"); + error("invalid module type: not an alert module"); return NULL; } diff --git a/lib/haka/container/vector.c b/lib/haka/container/vector.c index c80a5afb..969c069e 100644 --- a/lib/haka/container/vector.c +++ b/lib/haka/container/vector.c @@ -67,7 +67,7 @@ bool vector_reserve(struct vector *v, size_t count) if (!data && count > 0) { free(v->data); v->data = NULL; - error(L"memory error"); + error("memory error"); return false; } v->allocated_count = count; @@ -101,7 +101,7 @@ bool _vector_create(struct vector *v, size_t elemsize, size_t reservecount, void if (reservecount > 0) { v->data = malloc(v->element_size*reservecount); if (!v->data) { - error(L"memory error"); + error("memory error"); return false; } } diff --git a/lib/haka/engine.c b/lib/haka/engine.c index 17fe726c..a6f141c7 100644 --- a/lib/haka/engine.c +++ b/lib/haka/engine.c @@ -24,7 +24,7 @@ struct remote_launch { void (*callback)(void *); void *data; int state; - const wchar_t *error; + const char *error; bool own_error; semaphore_t sync; }; @@ -62,7 +62,7 @@ bool engine_prepare(int thread_count) engine_threads = malloc(sizeof(struct engine_thread *)*thread_count); if (!engine_threads) { - error(L"memory error"); + error("memory error"); return false; } @@ -76,7 +76,7 @@ struct engine_thread *engine_thread_init(struct lua_State *state, int id) struct engine_thread *new = malloc(sizeof(struct engine_thread)); if (!new) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -92,7 +92,7 @@ struct engine_thread *engine_thread_init(struct lua_State *state, int id) err = pipe2(new->interrupt_fd, O_NONBLOCK); if (err) { - error(L"%s", errno_error(errno)); + error("%s", errno_error(errno)); free(new); return NULL; } @@ -132,7 +132,7 @@ void engine_thread_cleanup(struct engine_thread *thread) struct remote_launch *current = list2_get(iter, struct remote_launch, list); current->state = -1; - current->error = L"aborted"; + current->error = "aborted"; current->own_error = false; iter = list2_erase(iter); @@ -229,7 +229,7 @@ static void _lua_remote_launcher(void *_data) if (lua_unmarshal(thread->lua_state, data->code, data->size)) { if (lua_pcall(thread->lua_state, 0, 1, h)) { - error(L"%s", lua_tostring(thread->lua_state, -1)); + error("%s", lua_tostring(thread->lua_state, -1)); } else { data->res = lua_marshal(thread->lua_state, h+1, &data->res_size); @@ -282,14 +282,14 @@ char* engine_thread_raw_lua_remote_launch(struct engine_thread *thread, const ch data.res = NULL; data.res_size = 0; - messagef(HAKA_LOG_DEBUG, L"engine", L"lua remote launch on thread %d: %llu bytes", + messagef(HAKA_LOG_DEBUG, "engine", "lua remote launch on thread %d: %zu bytes", engine_thread_id(thread), data.size); if (!engine_thread_remote_launch(thread, _lua_remote_launcher, &data)) { return NULL; } - messagef(HAKA_LOG_DEBUG, L"engine", L"lua remote launch result on thread %d: %llu bytes", + messagef(HAKA_LOG_DEBUG, "engine", "lua remote launch result on thread %d: %zu bytes", engine_thread_id(thread), data.res_size); if (data.res) { @@ -312,16 +312,16 @@ static void _engine_thread_check_remote_launch(void *_thread) for (iter = list2_begin(&thread->remote_launches); iter != end; ) { struct remote_launch *current = list2_get(iter, struct remote_launch, list); - messagef(HAKA_LOG_DEBUG, L"engine", L"execute lua remote launch on thread %d", + messagef(HAKA_LOG_DEBUG, "engine", "execute lua remote launch on thread %d", engine_thread_id(thread)); current->callback(current->data); if (check_error()) { - current->error = wcsdup(clear_error()); + current->error = strdup(clear_error()); current->own_error = true; current->state = -1; - messagef(HAKA_LOG_DEBUG, L"engine", L"remote launch error on thread %d: %ls", + messagef(HAKA_LOG_DEBUG, "engine", "remote launch error on thread %d: %s", engine_thread_id(thread), current->error); } else { @@ -358,10 +358,10 @@ void engine_thread_interrupt_begin(struct engine_thread *thread) const int err = write(thread->interrupt_fd[1], &interrupt_magic, 1); if (err != 1) { if (err == -1) { - messagef(HAKA_LOG_ERROR, L"engine", L"engine interrupt error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, "engine", "engine interrupt error: %s", errno_error(errno)); } else { - message(HAKA_LOG_ERROR, L"engine", L"engine interrupt error"); + message(HAKA_LOG_ERROR, "engine", "engine interrupt error"); } } } @@ -374,10 +374,10 @@ void engine_thread_interrupt_end(struct engine_thread *thread) const int err = read(thread->interrupt_fd[0], &byte, 1); if (err != 1 || byte != interrupt_magic) { if (err == -1) { - messagef(HAKA_LOG_ERROR, L"engine", L"engine interrupt error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, "engine", "engine interrupt error: %s", errno_error(errno)); } else { - message(HAKA_LOG_ERROR, L"engine", L"engine interrupt error"); + message(HAKA_LOG_ERROR, "engine", "engine interrupt error"); } } } diff --git a/lib/haka/error.c b/lib/haka/error.c index b758e75a..dfdb4ac8 100644 --- a/lib/haka/error.c +++ b/lib/haka/error.c @@ -13,11 +13,11 @@ #include -#define HAKA_ERROR_SIZE 2048 +#define HAKA_ERROR_SIZE 3072 struct local_error { bool is_error; - wchar_t error_message[HAKA_ERROR_SIZE]; + char error_message[HAKA_ERROR_SIZE]; char errno_message[HAKA_ERROR_SIZE]; }; @@ -69,15 +69,14 @@ static struct local_error *error_context() } -void error(const wchar_t *error, ...) +void error(const char *error, ...) { if (error_is_valid) { struct local_error *context = error_context(); if (!context->is_error) { va_list ap; va_start(ap, error); - vswprintf(context->error_message, HAKA_ERROR_SIZE, error, ap); - context->error_message[HAKA_ERROR_SIZE-1] = 0; + vsnprintf(context->error_message, HAKA_ERROR_SIZE, error, ap); va_end(ap); context->is_error = true; @@ -116,7 +115,7 @@ bool check_error() } } -const wchar_t *clear_error() +const char *clear_error() { if (error_is_valid) { struct local_error *context = error_context(); diff --git a/lib/haka/log.c b/lib/haka/log.c index 86fefb7e..32e9afd5 100644 --- a/lib/haka/log.c +++ b/lib/haka/log.c @@ -44,7 +44,7 @@ static const char *message_color[HAKA_LOG_LEVEL_LAST] = { CLEAR, // LOG_DEBUG }; -#define MESSAGE_BUFSIZE 2048 +#define MESSAGE_BUFSIZE 3072 static void message_delete(void *value) { @@ -71,7 +71,7 @@ FINI static void _message_fini() remove_all_logger(); { - wchar_t *buffer = local_storage_get(&local_message_key); + void *buffer = local_storage_get(&local_message_key); if (buffer) { message_delete(buffer); } @@ -92,7 +92,7 @@ FINI static void _message_fini() struct message_context_t { bool doing_message; - wchar_t buffer[MESSAGE_BUFSIZE]; + char buffer[MESSAGE_BUFSIZE]; }; static struct message_context_t *message_context() @@ -150,7 +150,7 @@ bool remove_logger(struct logger *logger) } if (!iter) { - error(L"Log module is not registered"); + error("Log module is not registered"); return false; } @@ -204,7 +204,7 @@ log_level str_to_level(const char *str) } if (level == HAKA_LOG_LEVEL_LAST) { - error(L"invalid logging level: %s", str); + error("invalid logging level: %s", str); } return level; @@ -215,11 +215,11 @@ void enable_stdout_logging(bool enable) stdout_enable = enable; } -bool stdout_message(log_level lvl, const wchar_t *module, const wchar_t *message) +bool stdout_message(log_level lvl, const char *module, const char *message) { const char *level_str = level_to_str(lvl); const int level_size = strlen(level_str); - const int module_size = wcslen(module); + const int module_size = strlen(module); FILE *fd = (lvl == HAKA_LOG_FATAL) ? stderr : stdout; thread_setcancelstate(false); @@ -231,12 +231,12 @@ bool stdout_message(log_level lvl, const wchar_t *module, const wchar_t *message } if (stdout_use_colors) { - fprintf(fd, "%s%s" CLEAR "%*s " MODULE_COLOR "%ls:" CLEAR "%*s %s%ls\n" CLEAR, level_color[lvl], level_str, + fprintf(fd, "%s%s" CLEAR "%*s " MODULE_COLOR "%s:" CLEAR "%*s %s%s\n" CLEAR, level_color[lvl], level_str, level_size-5, "", module, stdout_module_size-module_size, "", message_color[lvl], message); } else { - fprintf(fd, "%s%*s %ls:%*s %ls\n", level_str, level_size-5, "", + fprintf(fd, "%s%*s %s:%*s %s\n", level_str, level_size-5, "", module, stdout_module_size-module_size, "", message); } @@ -247,7 +247,7 @@ bool stdout_message(log_level lvl, const wchar_t *module, const wchar_t *message return true; } -void message(log_level level, const wchar_t *module, const wchar_t *message) +void message(log_level level, const char *module, const char *message) { struct message_context_t *context = message_context(); if (context && !context->doing_message) { @@ -288,7 +288,7 @@ void message(log_level level, const wchar_t *module, const wchar_t *message) } } -void messagef(log_level level, const wchar_t *module, const wchar_t *fmt, ...) +void messagef(log_level level, const char *module, const char *fmt, ...) { const log_level max_level = getlevel(module); if (level <= max_level) { @@ -296,8 +296,7 @@ void messagef(log_level level, const wchar_t *module, const wchar_t *fmt, ...) if (context && !context->doing_message) { va_list ap; va_start(ap, fmt); - vswprintf(context->buffer, MESSAGE_BUFSIZE, fmt, ap); - context->buffer[MESSAGE_BUFSIZE-1] = 0; + vsnprintf(context->buffer, MESSAGE_BUFSIZE, fmt, ap); message(level, module, context->buffer); va_end(ap); } @@ -305,7 +304,7 @@ void messagef(log_level level, const wchar_t *module, const wchar_t *fmt, ...) } struct module_level { - wchar_t *module; + char *module; log_level level; struct module_level *next; }; @@ -314,11 +313,11 @@ static struct module_level *module_level = NULL; static log_level default_level = HAKA_LOG_INFO; static rwlock_t log_level_lock = RWLOCK_INIT; -static struct module_level *get_module_level(const wchar_t *module, bool create) +static struct module_level *get_module_level(const char *module, bool create) { struct module_level *iter = module_level, *prev = NULL; while (iter) { - if (wcscmp(module, iter->module) == 0) { + if (strcmp(module, iter->module) == 0) { break; } @@ -329,14 +328,14 @@ static struct module_level *get_module_level(const wchar_t *module, bool create) if (!iter && create) { iter = malloc(sizeof(struct module_level)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } - iter->module = wcsdup(module); + iter->module = strdup(module); if (!iter->module) { free(iter); - error(L"memory error"); + error("memory error"); return NULL; } @@ -349,11 +348,11 @@ static struct module_level *get_module_level(const wchar_t *module, bool create) return iter; } -static void reset_module_level(const wchar_t *module) +static void reset_module_level(const char *module) { struct module_level *iter = module_level, *prev = NULL; while (iter) { - if (wcscmp(module, iter->module) == 0) { + if (strcmp(module, iter->module) == 0) { if (prev) { prev->next = iter->next; } else { @@ -367,13 +366,13 @@ static void reset_module_level(const wchar_t *module) } } -void setlevel(log_level level, const wchar_t *module) +void setlevel(log_level level, const char *module) { rwlock_writelock(&log_level_lock); if (!module) { if (level == HAKA_LOG_DEFAULT) { - message(HAKA_LOG_WARNING, L"core", L"cannot set log level default for global level"); + message(HAKA_LOG_WARNING, "core", "cannot set log level default for global level"); } else { default_level = level; } @@ -392,7 +391,7 @@ void setlevel(log_level level, const wchar_t *module) rwlock_unlock(&log_level_lock); } -log_level getlevel(const wchar_t *module) +log_level getlevel(const char *module) { log_level level; diff --git a/lib/haka/log_module.c b/lib/haka/log_module.c index 3cb081b3..08ebef3a 100644 --- a/lib/haka/log_module.c +++ b/lib/haka/log_module.c @@ -23,7 +23,7 @@ struct logger *log_module_logger(struct module *module, struct parameters *args) struct log_module *log_module; if (module->type != MODULE_LOG) { - error(L"invalid module type: not a log module"); + error("invalid module type: not a log module"); return NULL; } diff --git a/lib/haka/lua/alert.si b/lib/haka/lua/alert.si index 8daca44f..1769f3eb 100644 --- a/lib/haka/lua/alert.si +++ b/lib/haka/lua/alert.si @@ -37,7 +37,6 @@ static void free_nodes(struct alert_node **nodes) %} -%include "haka/lua/wchar.si" %include "haka/lua/swig.si" %include "haka/lua/array.si" @@ -48,13 +47,13 @@ enum alert_level { HAKA_ALERT_LOW, HAKA_ALERT_MEDIUM, HAKA_ALERT_HIGH, HAKA_ALER enum alert_completion { HAKA_ALERT_FAILED, HAKA_ALERT_SUCCESSFUL }; enum alert_node_type { HAKA_ALERT_NODE_ADDRESS, HAKA_ALERT_NODE_SERVICE }; -%typemap(in) wchar_t ** { +%typemap(in) char ** { if (lua_istable(L, $input)) { int i, size = lua_rawlen(L, $input); - $1 = malloc((size+1)*sizeof(wchar_t *)); + $1 = malloc((size+1)*sizeof(char *)); for (i = 0; i < size; ++i) { lua_rawgeti(L, $input, i+1); - $1[i] = str2wstr(lua_tostring(L, -1), lua_objlen(L, -1)); + $1[i] = strdup(lua_tostring(L, -1)); lua_pop(L, 1); } $1[i] = NULL; @@ -65,30 +64,30 @@ enum alert_node_type { HAKA_ALERT_NODE_ADDRESS, HAKA_ALERT_NODE_SERVICE }; } } -%typemap(typecheck, precedence=SWIG_TYPECHECK_STRING_ARRAY) wchar_t ** { +%typemap(typecheck, precedence=SWIG_TYPECHECK_STRING_ARRAY) char ** { $1 = lua_istable(L, $input); } -%typemap(memberin) wchar_t ** { +%typemap(memberin) char ** { free_array((void **)$1); $1 = $input; $input = NULL; } -%typemap(freearg) wchar_t ** { +%typemap(freearg) char ** { free_array((void**)$1); } struct alert_node { alert_node_type type; - wchar_t **list; + char **list; %extend { alert_node() { struct alert_node *node = malloc(sizeof(struct alert_node)); if (!node) { - error(L"memory error"); + error("memory error"); return NULL; } memset(node, 0, sizeof(struct alert_node)); @@ -122,20 +121,20 @@ APPLY_NULLTERM_ARRAY(alert_id); STRUCT_UNKNOWN_KEY_ERROR(alert_id); struct alert { - wchar_t *description; - wchar_t *method_description; + char *description; + char *method_description; alert_level severity; alert_level confidence; double confidence_num; alert_completion completion; - wchar_t **method_ref; + char **method_ref; %extend { alert() { struct alert *alert = malloc(sizeof(struct alert)); if (!alert) { - error(L"memory error"); + error("memory error"); return NULL; } memset(alert, 0, sizeof(struct alert)); @@ -171,7 +170,7 @@ struct alert { free($self->alert_ref); $self->alert_ref = malloc(size*sizeof(uint64)); if (!$self->alert_ref) { - error(L"memory error"); + error("memory error"); return; } @@ -204,7 +203,7 @@ struct alert_id *_post(struct alert *_alert) { struct alert_id *ret = malloc(sizeof(struct alert_id)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/lib/haka/lua/haka.i b/lib/haka/lua/haka.i index 9d12b0e8..d7411526 100644 --- a/lib/haka/lua/haka.i +++ b/lib/haka/lua/haka.i @@ -41,7 +41,7 @@ struct time { time(double ts) { struct time *t = malloc(sizeof(struct time)); if (!t) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -60,7 +60,7 @@ struct time { { *TEMP_OUTPUT = malloc(TIME_BUFSIZE); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } diff --git a/lib/haka/lua/hakainit.i b/lib/haka/lua/hakainit.i index a4077969..370ec3b4 100644 --- a/lib/haka/lua/hakainit.i +++ b/lib/haka/lua/hakainit.i @@ -85,5 +85,5 @@ bool stdout_support_colors(); end package.cpath = addpath(package.cpath, hakainit.module_path(true), { hakainit.module_prefix .. '?' .. hakainit.module_suffix }) - package.path = addpath(package.path, hakainit.module_path(false), { '?.bc', '?.lua' }) + package.path = addpath(package.path, hakainit.module_path(false), { '?.bc', '?.lua', '?/init.lua' }) } diff --git a/lib/haka/lua/log.si b/lib/haka/lua/log.si index 9e1bfbc6..c4128318 100644 --- a/lib/haka/lua/log.si +++ b/lib/haka/lua/log.si @@ -6,7 +6,6 @@ #include %} -%include "haka/lua/wchar.si" %include "haka/lua/swig.si" @@ -20,10 +19,10 @@ enum log_level { HAKA_LOG_FATAL, HAKA_LOG_ERROR, HAKA_LOG_WARNING, HAKA_LOG_INFO, HAKA_LOG_DEBUG, HAKA_LOG_DEFAULT }; %rename(_message) message; -void message(log_level level, const wchar_t *module, const wchar_t *message); +void message(log_level level, const char *module, const char *message); %rename(_setloglevel) setlevel; -void setlevel(log_level level, const wchar_t *module); +void setlevel(log_level level, const char *module); %luacode { -- Hide internals diff --git a/lib/haka/lua/lua.c b/lib/haka/lua/lua.c index 0246d402..4b929de9 100644 --- a/lib/haka/lua/lua.c +++ b/lib/haka/lua/lua.c @@ -45,13 +45,13 @@ bool lua_pushwstring(struct lua_State *L, const wchar_t *str) size = wcstombs(NULL, str, 0); if (size == (size_t)-1) { - error(L"unknown error"); + error("unknown error"); return false; } strmb = malloc(size+1); if (!strmb) { - error(L"memory error"); + error("memory error"); return false; } diff --git a/lib/haka/lua/marshal.c b/lib/haka/lua/marshal.c index 920d47fa..d01e605a 100644 --- a/lib/haka/lua/marshal.c +++ b/lib/haka/lua/marshal.c @@ -31,21 +31,21 @@ char *lua_marshal(struct lua_State *L, int index, size_t *len) lua_pushcfunction(L, mar_encode); lua_pushvalue(L, index); if (lua_pcall(L, 1, 1, h)) { - error(L"%s", lua_tostring(L, -1)); + error("%s", lua_tostring(L, -1)); } else { if (lua_isstring(L, -1)) { const char *ptr = lua_tolstring(L, h+1, len); ret = malloc(*len); if (!ret) { - error(L"memory error"); + error("memory error"); } else { memcpy(ret, ptr, *len); } } else { - error(L"marshaling error"); + error("marshaling error"); } } @@ -66,7 +66,7 @@ bool lua_unmarshal(struct lua_State *L, const char *data, size_t len) lua_pushcfunction(L, mar_decode); lua_pushlstring(L, data, len); if (lua_pcall(L, 1, 1, h)) { - error(L"%s", lua_tostring(L, -1)); + error("%s", lua_tostring(L, -1)); } lua_remove(L, h); diff --git a/lib/haka/lua/packet.si b/lib/haka/lua/packet.si index 9c3835b4..adaa42b3 100644 --- a/lib/haka/lua/packet.si +++ b/lib/haka/lua/packet.si @@ -10,6 +10,7 @@ #include #include #include +#include bool lua_pushppacket(lua_State *L, struct packet *pkt) { @@ -17,6 +18,8 @@ bool lua_pushppacket(lua_State *L, struct packet *pkt) } %} +%include "haka/lua/ref.si" + %nodefaultctor; %nodefaultdtor; @@ -25,6 +28,7 @@ bool lua_pushppacket(lua_State *L, struct packet *pkt) struct packet { %extend { %immutable; + struct lua_ref data; const struct time *timestamp { return packet_timestamp($self); } struct vbuffer *payload { return packet_payload($self); } int id { return packet_id($self); } @@ -53,7 +57,7 @@ struct packet { break; case STATUS_SENT: - error(L"operation not supported"); + error("operation not supported"); return; default: @@ -74,7 +78,7 @@ struct packet { case STATUS_NORMAL: case STATUS_SENT: - error(L"operation not supported"); + error("operation not supported"); return; default: @@ -105,11 +109,28 @@ struct packet { } }; +%{ + +#define packet_data_get(pkt) _packet_data_get(L, pkt) + +struct lua_ref _packet_data_get(lua_State *L, struct packet *packet) +{ + if (!lua_ref_isvalid(&packet->userdata)) { + lua_newtable(L); + lua_ref_get(L, &packet->userdata, -1); + lua_pop(L, 1); + } + + return packet->userdata; +} + +%} + %rename(packet_mode) _packet_mode; const char *_packet_mode(); %{ - const char *_packet_mode() + static const char *_packet_mode() { enum packet_mode mode = packet_mode(); switch (mode) { @@ -119,3 +140,13 @@ const char *_packet_mode(); } } %} + +%rename(network_time) _network_time; +const struct time *_network_time(); + +%{ + static const struct time *_network_time() + { + return time_realm_current_time(&network_time); + } +%} diff --git a/lib/haka/lua/ref.c b/lib/haka/lua/ref.c index 51c6d49b..12c388af 100644 --- a/lib/haka/lua/ref.c +++ b/lib/haka/lua/ref.c @@ -25,12 +25,13 @@ bool lua_ref_isvalid(struct lua_ref *ref) return (ref->state && ref->ref != LUA_NOREF); } -void lua_ref_get(struct lua_State *state, struct lua_ref *ref) +void lua_ref_get(struct lua_State *state, struct lua_ref *ref, int index) { lua_ref_clear(ref); - if (!lua_isnil(state, -1)) { + if (!lua_isnil(state, index)) { ref->state = lua_state_get(state); + lua_pushvalue(state, index); ref->ref = luaL_ref(state, LUA_REGISTRYINDEX); } } diff --git a/lib/haka/lua/regexp.si b/lib/haka/lua/regexp.si index 1aee87c4..42efa3c6 100644 --- a/lib/haka/lua/regexp.si +++ b/lib/haka/lua/regexp.si @@ -11,7 +11,7 @@ static char *escape_chars(const char *STRING, size_t SIZE) { char *str; str = malloc(SIZE + 1); if (!str) { - error(L"memory error"); + error("memory error"); return NULL; } while (iter < SIZE) { @@ -40,7 +40,7 @@ struct regexp_result { regexp_result() { struct regexp_result *ret = malloc(sizeof(struct regexp_result)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } *ret = regexp_result_init; @@ -117,7 +117,7 @@ struct regexp { *TEMP_SIZE = result.last - result.first; *TEMP_OUTPUT = malloc(*TEMP_SIZE); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } @@ -137,7 +137,7 @@ struct regexp { if (ret != REGEXP_MATCH) return NULL; result = malloc(sizeof(struct vbuffer_sub)); - if (!result) error(L"memory error"); + if (!result) error("memory error"); *result = tmp_result; vbuffer_sub_register(result); @@ -175,7 +175,7 @@ struct regexp_module { *TEMP_SIZE = result.last - result.first; *TEMP_OUTPUT = malloc(*TEMP_SIZE); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } @@ -198,7 +198,7 @@ struct regexp_module { if (ret != REGEXP_MATCH) return NULL; result = malloc(sizeof(struct vbuffer_sub)); - if (!result) error(L"memory error"); + if (!result) error("memory error"); *result = tmp_result; vbuffer_sub_register(result); @@ -207,7 +207,7 @@ struct regexp_module { struct regexp *compile(const char *pattern, int options = 0) { if (!pattern) { - error(L"nil argument"); + error("nil argument"); return NULL; } char *esc_regexp = escape_chars(pattern, strlen(pattern)); diff --git a/lib/haka/lua/state.c b/lib/haka/lua/state.c index 571cc246..84ec3f30 100644 --- a/lib/haka/lua/state.c +++ b/lib/haka/lua/state.c @@ -46,17 +46,17 @@ static void lua_dispatcher_hook(lua_State *L, lua_Debug *ar); static int panic(lua_State *L) { - messagef(HAKA_LOG_FATAL, L"lua", L"lua panic: %s", lua_tostring(L, -1)); + messagef(HAKA_LOG_FATAL, "lua", "lua panic: %s", lua_tostring(L, -1)); raise(SIGQUIT); return 0; } -void lua_state_print_error(struct lua_State *L, const wchar_t *msg) +void lua_state_print_error(struct lua_State *L, const char *msg) { if (msg) - messagef(HAKA_LOG_ERROR, L"lua", L"%ls: %s", msg, lua_tostring(L, -1)); + messagef(HAKA_LOG_ERROR, "lua", "%s: %s", msg, lua_tostring(L, -1)); else - messagef(HAKA_LOG_ERROR, L"lua", L"%s", lua_tostring(L, -1)); + messagef(HAKA_LOG_ERROR, "lua", "%s", lua_tostring(L, -1)); lua_pop(L, 1); } @@ -69,7 +69,7 @@ int lua_state_error_formater(lua_State *L) lua_state_error_hook(L); } - if (getlevel(L"lua") >= HAKA_LOG_DEBUG) { + if (getlevel("lua") >= HAKA_LOG_DEBUG) { if (!lua_isstring(L, -1)) { return 0; } @@ -424,11 +424,11 @@ void lua_state_trigger_haka_event(struct lua_state *state, const char *event) lua_pushnil(state->L); /* emitter */ lua_getfield(state->L, -5, event); /* event */ if (lua_isnil(state->L, -1)) { - messagef(HAKA_LOG_ERROR, L"lua", L"invalid haka event: %s", event); + messagef(HAKA_LOG_ERROR, "lua", "invalid haka event: %s", event); } else { if (lua_pcall(state->L, 3, 0, h)) { - lua_state_print_error(state->L, L"lua"); + lua_state_print_error(state->L, "lua"); } } @@ -512,7 +512,7 @@ static void lua_interrupt_call(struct lua_state_ext *state) if (lua_pcall(state->state.L, func->data ? 1 : 0, 0, h)) { if (!lua_isnil(state->state.L, -1)) { - lua_state_print_error(state->state.L, L"lua"); + lua_state_print_error(state->state.L, "lua"); } else { lua_pop(state->state.L, 1); @@ -574,7 +574,7 @@ bool lua_state_interrupt(struct lua_state *_state, lua_function func, void *data struct lua_interrupt_data *func_data; if (!lua_state_isvalid(&state->state)) { - error(L"invalid lua state"); + error("invalid lua state"); return false; } diff --git a/lib/haka/lua/state_machine.si b/lib/haka/lua/state_machine.si index bf6ccba6..5add4124 100644 --- a/lib/haka/lua/state_machine.si +++ b/lib/haka/lua/state_machine.si @@ -54,7 +54,7 @@ struct state *lua_transition_callback(struct state_machine_instance *state_machi newstate = state_machine_finish_state; } else { - lua_state_print_error(L, L"state machine"); + lua_state_print_error(L, "state machine"); newstate = state_machine_fail_state; } } @@ -65,14 +65,14 @@ struct state *lua_transition_callback(struct state_machine_instance *state_machi } else { if (!lua_istable(L, -1)) { - message(HAKA_LOG_ERROR, L"state machine", L"transition failed, invalid state"); + message(HAKA_LOG_ERROR, "state machine", "transition failed, invalid state"); } else { lua_getfield(L, -1, "_state"); assert(!lua_isnil(L, -1)); if (!SWIG_IsOK(SWIG_ConvertPtr(L, -1, (void**)&newstate, SWIGTYPE_p_state, 0))) { - message(HAKA_LOG_ERROR, L"state machine", L"transition failed, invalid state"); + message(HAKA_LOG_ERROR, "state machine", "transition failed, invalid state"); } lua_pop(L, 2); @@ -100,7 +100,7 @@ static struct transition_data *lua_transition_data_new(struct lua_ref *func) { struct lua_transition_data *ret = malloc(sizeof(struct lua_transition_data)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -189,7 +189,7 @@ struct state_machine { state_machine(const char *name) { if (!name) { - error(L"missing name parameter"); + error("missing name parameter"); return NULL; } @@ -217,7 +217,7 @@ struct state_machine { { struct lua_state_machine_context *context = malloc(sizeof(struct lua_state_machine_context)); if (!context) { - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/lib/haka/lua/vbuffer.si b/lib/haka/lua/vbuffer.si index 4eeaea34..a6192aae 100644 --- a/lib/haka/lua/vbuffer.si +++ b/lib/haka/lua/vbuffer.si @@ -28,7 +28,7 @@ struct vbuffer_iterator *vbuffer_iterator_lua_allocate(struct vbuffer_iterator * { struct vbuffer_iterator *ret = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -59,18 +59,18 @@ struct vbuffer_iterator { struct vbuffer_sub *sub; if (!data) { - error(L"missing data parameter"); + error("missing data parameter"); return; } if (!vbuffer_iterator_isinsertable($self, data)) { - error(L"circular buffer insertion"); + error("circular buffer insertion"); return; } sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -87,12 +87,12 @@ struct vbuffer_iterator { void _restore(struct vbuffer *data, bool clone=false) { if (!data) { - error(L"missing data parameter"); + error("missing data parameter"); return; } if (!vbuffer_iterator_isinsertable($self, data)) { - error(L"circular buffer insertion"); + error("circular buffer insertion"); return; } @@ -122,7 +122,7 @@ struct vbuffer_iterator { struct vbuffer_sub *sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -139,7 +139,7 @@ struct vbuffer_iterator { struct vbuffer_sub *sub(const char *mode, bool split = false) { if (!mode) { - error(L"missing mode parameter"); + error("missing mode parameter"); return NULL; } @@ -151,7 +151,7 @@ struct vbuffer_iterator { return vbuffer_iterator_sub__SWIG_0($self, -1, split); } else { - error(L"unknown sub buffer mode: %s", mode); + error("unknown sub buffer mode: %s", mode); return NULL; } } @@ -159,7 +159,7 @@ struct vbuffer_iterator { void move_to(struct vbuffer_iterator *iter) { if (!iter) { - error(L"invalid source iterator"); + error("invalid source iterator"); return; } @@ -208,7 +208,7 @@ struct vbuffer_iterator_blocking { { struct vbuffer_iterator_blocking *iter = malloc(sizeof(struct vbuffer_iterator_blocking)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -403,7 +403,7 @@ struct vbuffer_sub { { struct vbuffer_sub *sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -447,7 +447,7 @@ struct vbuffer_sub { void replace(struct vbuffer *data) { if (!vbuffer_iterator_isinsertable(&$self->begin, data)) { - error(L"circular buffer insertion"); + error("circular buffer insertion"); return; } @@ -477,7 +477,7 @@ struct vbuffer_sub { if (!select || !ref) { free(select); free(ref); - error(L"memory error"); + error("memory error"); return NULL; } @@ -496,7 +496,7 @@ struct vbuffer_sub { { struct vbuffer_sub *sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -512,7 +512,7 @@ struct vbuffer_sub { struct vbuffer_sub *sub(int offset, const char *mode) { if (!mode) { - error(L"missing mode parameter"); + error("missing mode parameter"); return NULL; } @@ -520,7 +520,7 @@ struct vbuffer_sub { return vbuffer_sub_sub__SWIG_0($self, offset, -1); } else { - error(L"unknown sub buffer mode: %s", mode); + error("unknown sub buffer mode: %s", mode); return NULL; } } @@ -529,7 +529,7 @@ struct vbuffer_sub { { struct vbuffer_iterator *iter = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -545,14 +545,14 @@ struct vbuffer_sub { struct vbuffer_iterator *pos(const char *pos) { if (!pos) { - error(L"missing pos parameter"); + error("missing pos parameter"); return NULL; } if (strcmp(pos, "begin") == 0) return vbuffer_sub_pos__SWIG_0($self, 0); else if (strcmp(pos, "end") == 0) return vbuffer_sub_pos__SWIG_0($self, -1); else { - error(L"unknown buffer position: %s", pos); + error("unknown buffer position: %s", pos); return NULL; } } @@ -567,7 +567,7 @@ struct vbuffer_sub { *TEMP_SIZE = vbuffer_sub_size($self); *TEMP_OUTPUT = malloc(*TEMP_SIZE+1); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } @@ -597,7 +597,7 @@ struct vbuffer { { struct vbuffer *buf = malloc(sizeof(struct vbuffer)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -613,7 +613,7 @@ struct vbuffer { { struct vbuffer *buf = malloc(sizeof(struct vbuffer)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -653,7 +653,7 @@ struct vbuffer { { struct vbuffer_iterator *iter = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -665,14 +665,14 @@ struct vbuffer { struct vbuffer_iterator *pos(const char *pos) { if (!pos) { - error(L"missing pos parameter"); + error("missing pos parameter"); return NULL; } if (strcmp(pos, "begin") == 0) return vbuffer_pos__SWIG_0($self, 0); else if (strcmp(pos, "end") == 0) return vbuffer_pos__SWIG_0($self, -1); else { - error(L"unknown buffer position: %s", pos); + error("unknown buffer position: %s", pos); return NULL; } } @@ -681,7 +681,7 @@ struct vbuffer { { struct vbuffer_sub *sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -693,7 +693,7 @@ struct vbuffer { struct vbuffer_sub *sub(int offset, const char *mode) { if (!mode) { - error(L"missing mode parameter"); + error("missing mode parameter"); return NULL; } @@ -701,7 +701,7 @@ struct vbuffer { return vbuffer_sub__SWIG_0($self, offset, -1); } else { - error(L"unknown sub buffer mode: %s", mode); + error("unknown sub buffer mode: %s", mode); return NULL; } } @@ -715,7 +715,7 @@ struct vbuffer { void _append(struct vbuffer *buffer) { if ($self == buffer) { - error(L"circular buffer insertion"); + error("circular buffer insertion"); return; } @@ -732,11 +732,11 @@ struct vbuffer { else if (strcmp(_mode, "copy") == 0) mode = CLONE_COPY; else if (strcmp(_mode, "ro orig") == 0) mode = CLONE_RO_ORIG; else if (strcmp(_mode, "ro clone") == 0) mode = CLONE_RO_CLONE; - else error(L"invalid clone mode"); + else error("invalid clone mode"); buf = malloc(sizeof(struct vbuffer)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -765,7 +765,7 @@ struct vbuffer_stream { { struct vbuffer_stream *stream = malloc(sizeof(struct vbuffer_stream)); if (!stream) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -788,7 +788,7 @@ struct vbuffer_stream { { struct vbuffer_iterator *iter = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -809,7 +809,7 @@ struct vbuffer_stream { { struct vbuffer *buf = malloc(sizeof(struct vbuffer)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -839,7 +839,7 @@ struct vbuffer_sub_stream { { struct vbuffer_sub_stream *stream = malloc(sizeof(struct vbuffer_sub_stream)); if (!stream) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -868,7 +868,7 @@ struct vbuffer_sub_stream { iter = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -887,7 +887,7 @@ struct vbuffer_sub_stream { { struct vbuffer_sub *sub = malloc(sizeof(struct vbuffer_sub)); if (!sub) { - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/lib/haka/luadebug/complete.c b/lib/haka/luadebug/complete.c index 1b56d1a1..74a97f00 100644 --- a/lib/haka/luadebug/complete.c +++ b/lib/haka/luadebug/complete.c @@ -86,7 +86,7 @@ bool complete_push_table_context(struct lua_State *L, struct luadebug_complete * buffer = malloc(7 + table_size + 1); if (!buffer) { - error(L"memory error"); + error("memory error"); return false; } diff --git a/lib/haka/luadebug/debugger.c b/lib/haka/luadebug/debugger.c index f804354d..4eabeb86 100644 --- a/lib/haka/luadebug/debugger.c +++ b/lib/haka/luadebug/debugger.c @@ -27,7 +27,7 @@ #include "debugger.h" #include "utils.h" -#define MODULE L"debugger" +#define MODULE "debugger" struct luadebug_debugger { @@ -804,7 +804,7 @@ static bool prepare_debugger(struct luadebug_debugger *session) if (!session->user) { mutex_unlock(¤t_user_mutex); - message(HAKA_LOG_ERROR, MODULE, L"no input/output handler"); + message(HAKA_LOG_ERROR, MODULE, "no input/output handler"); on_user_error(session); return false; } @@ -988,7 +988,7 @@ static void luadebug_debugger_activate(struct luadebug_debugger *session) session->active = true; - message(HAKA_LOG_INFO, MODULE, L"lua debugger activated"); + message(HAKA_LOG_INFO, MODULE, "lua debugger activated"); } } @@ -998,7 +998,7 @@ struct luadebug_debugger *luadebug_debugger_create(struct lua_State *L, bool bre lua_getfield(L, LUA_REGISTRYINDEX, "__debugger"); if (!lua_isnil(L, -1)) { - error(L"debugger already attached"); + error("debugger already attached"); lua_pop(L, 1); return NULL; } @@ -1006,7 +1006,7 @@ struct luadebug_debugger *luadebug_debugger_create(struct lua_State *L, bool bre ret = malloc(sizeof(struct luadebug_debugger)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -1054,7 +1054,7 @@ static void luadebug_debugger_deactivate(struct luadebug_debugger *session, bool session->active = false; - message(HAKA_LOG_INFO, MODULE, L"lua debugger deactivated"); + message(HAKA_LOG_INFO, MODULE, "lua debugger deactivated"); } } diff --git a/lib/haka/luadebug/interactive.c b/lib/haka/luadebug/interactive.c index 30e88684..1f9628d1 100644 --- a/lib/haka/luadebug/interactive.c +++ b/lib/haka/luadebug/interactive.c @@ -95,7 +95,7 @@ void luadebug_interactive_enter(struct lua_State *L, const char *single, const c session.user = user; if (!session.user) { - message(HAKA_LOG_ERROR, L"interactive", L"no input/output handler"); + message(HAKA_LOG_ERROR, "interactive", "no input/output handler"); mutex_unlock(¤t_user_mutex); return; } diff --git a/lib/haka/luadebug/user_readline.c b/lib/haka/luadebug/user_readline.c index add046c4..1460922b 100644 --- a/lib/haka/luadebug/user_readline.c +++ b/lib/haka/luadebug/user_readline.c @@ -92,7 +92,7 @@ struct luadebug_user *luadebug_user_readline() { struct luadebug_user *ret = malloc(sizeof(struct luadebug_user)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/lib/haka/luadebug/user_remote.c b/lib/haka/luadebug/user_remote.c index 355333e0..48929103 100644 --- a/lib/haka/luadebug/user_remote.c +++ b/lib/haka/luadebug/user_remote.c @@ -15,7 +15,7 @@ #include -#define MODULE L"remote" +#define MODULE "remote" struct luadebug_remote_user { @@ -86,7 +86,7 @@ static bool write_string(int fd, const char *string) static void report_error(struct luadebug_remote_user *user, int err) { if (!user->error) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(err)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(err)); user->error = true; } } @@ -255,7 +255,7 @@ struct luadebug_user *luadebug_user_remote(int fd) { struct luadebug_remote_user *ret = malloc(sizeof(struct luadebug_remote_user)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -315,7 +315,7 @@ static bool luadebug_user_remote_server_session(int fd, struct luadebug_user *us { char *line = read_string(fd); if (!line) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return false; } @@ -328,7 +328,7 @@ static bool luadebug_user_remote_server_session(int fd, struct luadebug_user *us { char *line = read_string(fd); if (!line) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return false; } @@ -341,13 +341,13 @@ static bool luadebug_user_remote_server_session(int fd, struct luadebug_user *us { char *line = read_string(fd); if (!line) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return false; } char *rdline = user->readline(user, line); if (!rdline) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); free(line); return false; } @@ -356,7 +356,7 @@ static bool luadebug_user_remote_server_session(int fd, struct luadebug_user *us command = '1'; if (write(fd, &command, 1) != 1) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return false; } write_string(fd, rdline); @@ -370,7 +370,7 @@ static bool luadebug_user_remote_server_session(int fd, struct luadebug_user *us } default: - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return false; } } @@ -387,7 +387,7 @@ void luadebug_user_remote_server(int fd, struct luadebug_user *user) { char *line = read_string(fd); if (!line) { - messagef(HAKA_LOG_ERROR, MODULE, L"remote communication error: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE, "remote communication error: %s", errno_error(errno)); return; } @@ -395,7 +395,7 @@ void luadebug_user_remote_server(int fd, struct luadebug_user *user) free(line); if (!luadebug_user_remote_server_session(fd, user)) { - message(HAKA_LOG_ERROR, MODULE, L"remote communication error"); + message(HAKA_LOG_ERROR, MODULE, "remote communication error"); return; } break; @@ -407,7 +407,7 @@ void luadebug_user_remote_server(int fd, struct luadebug_user *user) } default: - message(HAKA_LOG_ERROR, MODULE, L"remote communication error"); + message(HAKA_LOG_ERROR, MODULE, "remote communication error"); return; } } diff --git a/lib/haka/module.c b/lib/haka/module.c index 9ff4ebe3..ef6c6c7e 100644 --- a/lib/haka/module.c +++ b/lib/haka/module.c @@ -78,13 +78,13 @@ struct module *module_load(const char *module_name, struct parameters *args) if (!module_handle) { free(full_module_name); - error(L"%s", strdup(dlerror())); + error("%s", strdup(dlerror())); return NULL; } module = (struct module*)dlsym(module_handle, "HAKA_MODULE"); if (!module) { - error(L"%s", strdup(dlerror())); + error("%s", strdup(dlerror())); dlclose(module); free(full_module_name); return NULL; @@ -93,7 +93,7 @@ struct module *module_load(const char *module_name, struct parameters *args) module->handle = module_handle; if (module->api_version != HAKA_API_VERSION) { - messagef(HAKA_LOG_INFO, L"core", L"%s: invalid API version", full_module_name); + messagef(HAKA_LOG_INFO, "core", "%s: invalid API version", full_module_name); dlclose(module->handle); free(full_module_name); return NULL; @@ -102,23 +102,23 @@ struct module *module_load(const char *module_name, struct parameters *args) if (atomic_get(&module->ref) == 0) { /* Initialize the module */ if (module->name && module->author) { - messagef(HAKA_LOG_INFO, L"core", L"load module '%s', %ls, %ls", + messagef(HAKA_LOG_INFO, "core", "load module '%s', %s, %s", full_module_name, module->name, module->author); } else if (module->name || module->author) { - messagef(HAKA_LOG_INFO, L"core", L"load module '%s', %ls%ls", - full_module_name, module->name ? module->name : L"", - module->author ? module->author : L""); + messagef(HAKA_LOG_INFO, "core", "load module '%s', %s%s", + full_module_name, module->name ? module->name : "", + module->author ? module->author : ""); } else { - messagef(HAKA_LOG_INFO, L"core", L"load module '%s'", full_module_name); + messagef(HAKA_LOG_INFO, "core", "load module '%s'", full_module_name); } if (module->init(args) || check_error()) { if (check_error()) { - error(L"unable to initialize module: %ls", clear_error()); + error("unable to initialize module: %s", clear_error()); } else { - error(L"unable to initialize module"); + error("unable to initialize module"); } dlclose(module->handle); @@ -142,7 +142,7 @@ void module_release(struct module *module) { if (atomic_dec(&module->ref) == 0) { /* Cleanup the module */ - messagef(HAKA_LOG_INFO, L"core", L"unload module '%ls'", module->name); + messagef(HAKA_LOG_INFO, "core", "unload module '%s'", module->name); module->cleanup(); dlclose(module->handle); } @@ -169,7 +169,7 @@ bool module_set_default_path() path = malloc(path_len); if (!path) { - error(L"memory error"); + error("memory error"); return false; } @@ -191,7 +191,7 @@ bool module_set_default_path() path = malloc(path_len); if (!path) { - error(L"memory error"); + error("memory error"); return false; } @@ -210,7 +210,7 @@ void module_set_path(const char *path, bool c) char **old_path = c ? &modules_cpath : &modules_path; if (!strchr(path, '*')) { - error(L"invalid module path"); + error("invalid module path"); return; } @@ -219,7 +219,7 @@ void module_set_path(const char *path, bool c) *old_path = strdup(path); if (!*old_path) { - error(L"memory error"); + error("memory error"); return; } } @@ -231,14 +231,14 @@ void module_add_path(const char *path, bool c) const int modules_path_len = *old_path ? strlen(*old_path) : 0; if (!strchr(path, '*')) { - error(L"invalid module path"); + error("invalid module path"); return; } if (modules_path_len > 0) { new_modules_path = malloc(modules_path_len + strlen(path) + 2); if (!new_modules_path) { - error(L"memory error"); + error("memory error"); return; } diff --git a/lib/haka/packet.c b/lib/haka/packet.c index 83ac6dbe..b61115cd 100644 --- a/lib/haka/packet.c +++ b/lib/haka/packet.c @@ -44,7 +44,7 @@ int set_packet_module(struct module *module) struct packet_module *prev_packet_module = packet_module; if (module && module->type != MODULE_PACKET) { - error(L"'%ls' is not a packet module", module->name); + error("'%s' is not a packet module", module->name); return 1; } @@ -119,9 +119,10 @@ int packet_receive(struct packet **pkt) if (!ret && *pkt) { (*pkt)->lua_object = lua_object_init; + lua_ref_init(&(*pkt)->userdata); atomic_set(&(*pkt)->ref, 1); assert(vbuffer_isvalid(&(*pkt)->payload)); - messagef(HAKA_LOG_DEBUG, L"packet", L"received packet id=%lli", + messagef(HAKA_LOG_DEBUG, "packet", "received packet id=%lli", packet_module->get_id(*pkt)); if (!packet_module->is_realtime()) { @@ -147,7 +148,7 @@ void packet_drop(struct packet *pkt) { assert(packet_module); assert(pkt); - messagef(HAKA_LOG_DEBUG, L"packet", L"dropping packet id=%lli", + messagef(HAKA_LOG_DEBUG, "packet", "dropping packet id=%lli", packet_module->get_id(pkt)); packet_module->verdict(pkt, FILTER_DROP); @@ -163,7 +164,7 @@ void packet_accept(struct packet *pkt) assert(packet_module); assert(pkt); - messagef(HAKA_LOG_DEBUG, L"packet", L"accepting packet id=%lli", + messagef(HAKA_LOG_DEBUG, "packet", "accepting packet id=%lli", packet_module->get_id(pkt)); { @@ -188,6 +189,7 @@ bool packet_release(struct packet *pkt) assert(packet_module); assert(pkt); if (atomic_dec(&pkt->ref) == 0) { + lua_ref_clear(&pkt->userdata); lua_object_release(pkt, &pkt->lua_object); packet_module->release_packet(pkt); return true; @@ -210,6 +212,7 @@ struct packet *packet_new(size_t size) } pkt->lua_object = lua_object_init; + lua_ref_init(&pkt->userdata); atomic_set(&pkt->ref, 1); assert(vbuffer_isvalid(&pkt->payload)); @@ -227,7 +230,7 @@ bool packet_send(struct packet *pkt) case STATUS_NORMAL: case STATUS_SENT: - error(L"operation not supported (packet captured)"); + error("operation not supported (packet captured)"); return false; default: @@ -235,7 +238,7 @@ bool packet_send(struct packet *pkt) return false; } - messagef(HAKA_LOG_DEBUG, L"packet", L"sending packet id=%lli", + messagef(HAKA_LOG_DEBUG, "packet", "sending packet id=%lli", packet_module->get_id(pkt)); { diff --git a/lib/haka/parameters.c b/lib/haka/parameters.c index b71db5ec..8740a7d0 100644 --- a/lib/haka/parameters.c +++ b/lib/haka/parameters.c @@ -28,13 +28,13 @@ struct parameters *parameters_open(const char *file) ret = malloc(sizeof(struct parameters)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } ret->iniparser_dict = iniparser_load(file); if (!ret->iniparser_dict) { - error(L"configuration file parsing error"); + error("configuration file parsing error"); free(ret->iniparser_dict); return NULL; } @@ -50,13 +50,13 @@ struct parameters *parameters_create() ret = malloc(sizeof(struct parameters)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } ret->iniparser_dict = dictionary_new(0); if (!ret->iniparser_dict) { - error(L"memory error"); + error("memory error"); free(ret->iniparser_dict); return NULL; } @@ -77,7 +77,7 @@ void parameters_free(struct parameters *params) int parameters_open_section(struct parameters *params, const char *section) { if (strlen(section) >= MAX_SECTION_LEN) { - error(L"Section name is too long"); + error("Section name is too long"); return 1; } @@ -100,7 +100,7 @@ int parameters_close_section(struct parameters *params) static bool parameters_check_key(struct parameters *params, const char *key) { if (strlen(key) >= MAX_KEY_LEN) { - error(L"Key is too long"); + error("Key is too long"); return false; } diff --git a/lib/haka/regexp_module.c b/lib/haka/regexp_module.c index da860692..34a1f155 100644 --- a/lib/haka/regexp_module.c +++ b/lib/haka/regexp_module.c @@ -9,7 +9,7 @@ struct regexp_module *regexp_module_load(const char *module_name, struct paramet struct module *module = module_load(module_name, args); if (module == NULL || module->type != MODULE_REGEXP) { if (module != NULL) module_release(module); - error(L"Module %s is not of type MODULE_REGEXP", module_name); + error("Module %s is not of type MODULE_REGEXP", module_name); return NULL; } diff --git a/lib/haka/state_machine.c b/lib/haka/state_machine.c index cf7658a6..a8284efb 100644 --- a/lib/haka/state_machine.c +++ b/lib/haka/state_machine.c @@ -14,7 +14,7 @@ #include -#define MODULE L"state-machine" +#define MODULE "state-machine" /* @@ -111,7 +111,7 @@ struct state *state_machine_create_state(struct state_machine *state_machine, co { struct state *state = malloc(sizeof(struct state)); if (!state) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -120,7 +120,7 @@ struct state *state_machine_create_state(struct state_machine *state_machine, co if (name) { state->name = strdup(name); if (!state->name) { - error(L"memory error"); + error("memory error"); free(state); return NULL; } @@ -197,7 +197,7 @@ bool state_set_finish_transition(struct state *state, struct transition_data *da } static struct state _state_machine_fail_state = { - name: "FAIL", + name: "FAI", fail: {0}, enter: {0}, leave: {0}, @@ -236,13 +236,13 @@ struct state_machine *state_machine_create(const char *name) machine = malloc(sizeof(struct state_machine)); if (!machine) { - error(L"memory error"); + error("memory error"); return NULL; } machine->name = strdup(name); if (!machine->name) { - error(L"memory error"); + error("memory error"); free(machine); return NULL; } @@ -280,7 +280,7 @@ bool state_machine_compile(struct state_machine *machine) } if (!machine->initial) { - error(L"%s: no initial state", machine->name); + error("%s: no initial state", machine->name); return false; } @@ -330,7 +330,7 @@ static struct state *state_machine_leave_state(struct state_machine_instance *in } if (have_transition(instance, &instance->current->leave)) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: leave transition on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: leave transition on state '%s'", instance->state_machine->name, instance->current->name); newstate = do_transition(instance, &instance->current->leave); @@ -355,7 +355,7 @@ static void transition_timeout(int count, void *_data) trans = vector_get(&data->instance->current->timeouts, struct transition, data->timer_index); assert(trans); - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: timeout trigger on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: timeout trigger on state '%s'", data->instance->state_machine->name, data->instance->current->name); newstate = do_transition(data->instance, trans); @@ -384,7 +384,7 @@ static void state_machine_enter_state(struct state_machine_instance *instance, s instance->current = state; if (have_transition(instance, &instance->current->enter)) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: enter transition on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: enter transition on state '%s'", instance->state_machine->name, instance->current->name); newstate = do_transition(instance, &instance->current->enter); @@ -453,7 +453,7 @@ struct state_machine_instance *state_machine_instance(struct state_machine *stat { struct state_machine_instance *instance = malloc(sizeof(struct state_machine_instance)); if (!instance) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -467,7 +467,7 @@ struct state_machine_instance *state_machine_instance(struct state_machine *stat instance->failed = false; instance->in_failure = false; - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: initial state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: initial state '%s'", instance->state_machine->name, state_machine->initial->name); return instance; @@ -483,7 +483,7 @@ void state_machine_instance_init(struct state_machine_instance *instance) instance->in_failure = false; if (have_transition(instance, &instance->current->init)) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: init transition on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: init transition on state '%s'", instance->state_machine->name, instance->current->name); do_transition(instance, &instance->current->init); @@ -493,7 +493,7 @@ void state_machine_instance_init(struct state_machine_instance *instance) void state_machine_instance_finish(struct state_machine_instance *instance) { if (instance->finished) { - error(L"state machine instance has finished"); + error("state machine instance has finished"); return; } @@ -502,11 +502,11 @@ void state_machine_instance_finish(struct state_machine_instance *instance) state_machine_leave_state(instance); - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: finish from state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: finish from state '%s'", instance->state_machine->name, current->name); if (have_transition(instance, ¤t->finish)) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: finish transition on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: finish transition on state '%s'", instance->state_machine->name, current->name); do_transition(instance, ¤t->finish); @@ -535,7 +535,7 @@ void state_machine_instance_update(struct state_machine_instance *instance, stru assert(newstate); if (instance->finished) { - error(L"state machine instance has finished"); + error("state machine instance has finished"); return; } @@ -547,11 +547,11 @@ void state_machine_instance_update(struct state_machine_instance *instance, stru } else { if (instance->current) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: transition from state '%s' to state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: transition from state '%s' to state '%s'", instance->state_machine->name, instance->current->name, newstate->name); } else { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: transition to state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: transition to state '%s'", instance->state_machine->name, newstate->name); } @@ -567,7 +567,7 @@ static void _state_machine_instance_transition(struct state_machine_instance *in if (instance->current) { if (trans->callback) { if (trans->callback->callback) { - messagef(HAKA_LOG_DEBUG, MODULE, L"%s: %s transition on state '%s'", + messagef(HAKA_LOG_DEBUG, MODULE, "%s: %s transition on state '%s'", instance->state_machine->name, type, instance->current->name); newstate = trans->callback->callback(instance, trans->callback); @@ -586,7 +586,7 @@ void state_machine_instance_fail(struct state_machine_instance *instance) } if (instance->finished) { - error(L"state machine instance has finished"); + error("state machine instance has finished"); return; } diff --git a/lib/haka/system.c b/lib/haka/system.c index 4e28e2e2..c1c93400 100644 --- a/lib/haka/system.c +++ b/lib/haka/system.c @@ -50,7 +50,7 @@ INIT static void system_init() sigaction(SIGILL, &sa, NULL) || sigaction(SIGFPE, &sa, NULL) || sigaction(SIGABRT, &sa, NULL)) { - messagef(HAKA_LOG_FATAL, L"core", L"%s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, "core", "%s", errno_error(errno)); abort(); } } @@ -64,7 +64,7 @@ bool system_register_fatal_cleanup(void (*callback)()) { void (**func)() = (void (**)()) vector_push(&fatal_cleanup, void *); if (!func) { - error(L"memory error"); + error("memory error"); return false; } diff --git a/lib/haka/test/regexp.c b/lib/haka/test/regexp.c index 8ffdd203..0be3ed3d 100644 --- a/lib/haka/test/regexp.c +++ b/lib/haka/test/regexp.c @@ -51,9 +51,9 @@ START_TEST(regexp_module_load_should_return_null_if_module_is_not_MODULE_REGEXP) // Then ck_assert(module == NULL); - const wchar_t *error = clear_error(); - ck_assert_msg(wcscmp(error, L"Module protocol/ipv4 is not of type MODULE_REGEXP") == 0, - "Was expecting 'Module protocol/ipv4 is not of type MODULE_REGEXP', but found '%ls'", error); + const char *error = clear_error(); + ck_assert_msg(strcmp(error, "Module protocol/ipv4 is not of type MODULE_REGEXP") == 0, + "Was expecting 'Module protocol/ipv4 is not of type MODULE_REGEXP', but found '%s'", error); } END_TEST @@ -96,9 +96,9 @@ START_TEST(regexp_compile_should_should_fail_with_module_error) // Then ck_assert(re == NULL); - const wchar_t *error = clear_error(); - ck_assert_msg(wcsncmp(error, L"PCRE compilation failed with error '", 36) == 0, - "Was expecting 'PCRE compilation failed with error '...', but found '%ls'", error); + const char *error = clear_error(); + ck_assert_msg(strncmp(error, "PCRE compilation failed with error '", 36) == 0, + "Was expecting 'PCRE compilation failed with error '...', but found '%s'", error); // Finally rem->release_regexp(re); diff --git a/lib/haka/thread.c b/lib/haka/thread.c index 740587a1..0144c6e9 100644 --- a/lib/haka/thread.c +++ b/lib/haka/thread.c @@ -60,7 +60,7 @@ bool thread_create(thread_t *thread, void *(*main)(void*), void *param) { const int err = pthread_create(thread, NULL, main, param); if (err) { - error(L"thread creation error: %s", errno_error(err)); + error("thread creation error: %s", errno_error(err)); return false; } return true; @@ -70,7 +70,7 @@ bool thread_join(thread_t thread, void **ret) { const int err = pthread_join(thread, ret); if (err) { - error(L"thread join error: %s", errno_error(err)); + error("thread join error: %s", errno_error(err)); return false; } return true; @@ -80,7 +80,7 @@ bool thread_cancel(thread_t thread) { const int err = pthread_cancel(thread); if (err) { - error(L"thread cancel error: %s", errno_error(err)); + error("thread cancel error: %s", errno_error(err)); return false; } return true; @@ -95,7 +95,7 @@ bool thread_signal(thread_t thread, int sig) { const int err = pthread_kill(thread, sig); if (err) { - error(L"thread sigmask error: %s", errno_error(err)); + error("thread sigmask error: %s", errno_error(err)); return false; } return true; @@ -105,7 +105,7 @@ bool thread_sigmask(int how, sigset_t *set, sigset_t *oldset) { const int err = pthread_sigmask(how, set, oldset); if (err) { - error(L"thread sigmask error: %s", errno_error(err)); + error("thread sigmask error: %s", errno_error(err)); return false; } return true; @@ -118,11 +118,11 @@ bool thread_setcanceltype(enum thread_cancel_t type) switch (type) { case THREAD_CANCEL_DEFERRED: err = pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); break; case THREAD_CANCEL_ASYNCHRONOUS: err = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); break; - default: error(L"invalid thread cancel mode"); return false; + default: error("invalid thread cancel mode"); return false; } if (err) { - error(L"thread cancel type error: %s", errno_error(err)); + error("thread cancel type error: %s", errno_error(err)); return false; } @@ -133,7 +133,7 @@ bool thread_setcancelstate(bool enable) { const int err = pthread_setcancelstate(enable ? PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE, NULL); if (err) { - error(L"thread set cancel state error: %s", errno_error(err)); + error("thread set cancel state error: %s", errno_error(err)); return false; } return true; @@ -170,7 +170,7 @@ bool thread_kill(thread_t thread, int sig) { const int err = pthread_kill(thread, sig); if (err) { - error(L"thread error: %s", errno_error(err)); + error("thread error: %s", errno_error(err)); return false; } return true; @@ -232,7 +232,7 @@ bool mutex_init(mutex_t *mutex, bool recursive) err = pthread_mutexattr_init(&attr); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } @@ -242,14 +242,14 @@ bool mutex_init(mutex_t *mutex, bool recursive) err = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } err = pthread_mutex_init(mutex, &attr); pthread_mutexattr_destroy(&attr); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } return true; @@ -259,7 +259,7 @@ bool mutex_destroy(mutex_t *mutex) { const int err = pthread_mutex_destroy(mutex); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } return true; @@ -269,7 +269,7 @@ bool mutex_lock(mutex_t *mutex) { const int err = pthread_mutex_lock(mutex); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } return true; @@ -281,7 +281,7 @@ bool mutex_trylock(mutex_t *mutex) if (err == 0) return true; else if (err == EBUSY) return false; else { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } } @@ -290,7 +290,7 @@ bool mutex_unlock(mutex_t *mutex) { const int err = pthread_mutex_unlock(mutex); if (err) { - error(L"mutex error: %s", errno_error(err)); + error("mutex error: %s", errno_error(err)); return false; } return true; @@ -305,7 +305,7 @@ bool spinlock_init(spinlock_t *lock) { const int err = pthread_spin_init(lock, PTHREAD_PROCESS_PRIVATE); if (err) { - error(L"spinlock error: %s", errno_error(err)); + error("spinlock error: %s", errno_error(err)); return false; } return true; @@ -315,7 +315,7 @@ bool spinlock_destroy(spinlock_t *lock) { const int err = pthread_spin_destroy(lock); if (err) { - error(L"spinlock error: %s", errno_error(err)); + error("spinlock error: %s", errno_error(err)); return false; } return true; @@ -325,7 +325,7 @@ bool spinlock_lock(spinlock_t *lock) { const int err = pthread_spin_lock(lock); if (err) { - error(L"spinlock error: %s", errno_error(err)); + error("spinlock error: %s", errno_error(err)); return false; } return true; @@ -337,7 +337,7 @@ bool spinlock_trylock(spinlock_t *lock) if (err == 0) return true; else if (err == EBUSY) return false; else { - error(L"spinlock error: %s", errno_error(err)); + error("spinlock error: %s", errno_error(err)); return false; } } @@ -346,7 +346,7 @@ bool spinlock_unlock(spinlock_t *lock) { const int err = pthread_spin_unlock(lock); if (err) { - error(L"spinlock error: %s", errno_error(err)); + error("spinlock error: %s", errno_error(err)); return false; } return true; @@ -361,7 +361,7 @@ bool rwlock_init(rwlock_t *rwlock) { const int err = pthread_rwlock_init(rwlock, NULL); if (err) { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } return true; @@ -371,7 +371,7 @@ bool rwlock_destroy(rwlock_t *rwlock) { const int err = pthread_rwlock_destroy(rwlock); if (err) { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } return true; @@ -381,7 +381,7 @@ bool rwlock_readlock(rwlock_t *rwlock) { const int err = pthread_rwlock_rdlock(rwlock); if (err) { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } return true; @@ -393,7 +393,7 @@ bool rwlock_tryreadlock(rwlock_t *rwlock) if (err == 0) return true; else if (err == EBUSY) return false; else { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } } @@ -402,7 +402,7 @@ bool rwlock_writelock(rwlock_t *rwlock) { const int err = pthread_rwlock_wrlock(rwlock); if (err) { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } return true; @@ -414,7 +414,7 @@ bool rwlock_trywritelock(rwlock_t *rwlock) if (err == 0) return true; else if (err == EBUSY) return false; else { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } } @@ -423,7 +423,7 @@ bool rwlock_unlock(rwlock_t *rwlock) { const int err = pthread_rwlock_unlock(rwlock); if (err) { - error(L"rwlock error: %s", errno_error(err)); + error("rwlock error: %s", errno_error(err)); return false; } return true; @@ -438,7 +438,7 @@ bool semaphore_init(semaphore_t *semaphore, uint32 initial) { const int err = sem_init(semaphore, 0, initial); if (err) { - error(L"semaphore error: %s", errno_error(err)); + error("semaphore error: %s", errno_error(err)); return false; } return true; @@ -448,7 +448,7 @@ bool semaphore_destroy(semaphore_t *semaphore) { const int err = sem_destroy(semaphore); if (err) { - error(L"semaphore error: %s", errno_error(err)); + error("semaphore error: %s", errno_error(err)); return false; } return true; @@ -458,7 +458,7 @@ bool semaphore_wait(semaphore_t *semaphore) { const int err = sem_wait(semaphore); if (err) { - error(L"semaphore error: %s", errno_error(err)); + error("semaphore error: %s", errno_error(err)); return false; } return true; @@ -468,7 +468,7 @@ bool semaphore_post(semaphore_t *semaphore) { const int err = sem_post(semaphore); if (err) { - error(L"semaphore error: %s", errno_error(err)); + error("semaphore error: %s", errno_error(err)); return false; } return true; @@ -483,7 +483,7 @@ bool barrier_init(barrier_t *barrier, uint32 count) { const int err = pthread_barrier_init(barrier, NULL, count); if (err) { - error(L"barrier error: %s", errno_error(err)); + error("barrier error: %s", errno_error(err)); return false; } return true; @@ -493,7 +493,7 @@ bool barrier_destroy(barrier_t *barrier) { const int err = pthread_barrier_destroy(barrier); if (err) { - error(L"barrier error: %s", errno_error(err)); + error("barrier error: %s", errno_error(err)); return false; } return true; @@ -503,7 +503,7 @@ bool barrier_wait(barrier_t *barrier) { const int err = pthread_barrier_wait(barrier); if (err && err != PTHREAD_BARRIER_SERIAL_THREAD) { - error(L"barrier error: %s", errno_error(err)); + error("barrier error: %s", errno_error(err)); return false; } return true; @@ -518,7 +518,7 @@ bool local_storage_init(local_storage_t *key, void (*destructor)(void *)) { const int err = pthread_key_create(key, destructor); if (err) { - error(L"local storage error: %s", errno_error(err)); + error("local storage error: %s", errno_error(err)); return false; } return true; @@ -528,7 +528,7 @@ bool local_storage_destroy(local_storage_t *key) { const int err = pthread_key_delete(*key); if (err) { - error(L"local storage error: %s", errno_error(err)); + error("local storage error: %s", errno_error(err)); return false; } return true; @@ -543,7 +543,7 @@ bool local_storage_set(local_storage_t *key, const void *value) { const int err = pthread_setspecific(*key, value); if (err) { - error(L"local storage error: %s", errno_error(err)); + error("local storage error: %s", errno_error(err)); return false; } return true; diff --git a/lib/haka/time.c b/lib/haka/time.c index b15bbfca..dc135c56 100644 --- a/lib/haka/time.c +++ b/lib/haka/time.c @@ -27,7 +27,7 @@ bool time_gettimestamp(struct time *t) { struct timespec time; if (clock_gettime(CLOCK_REALTIME, &time)) { - error(L"time error: %s", errno_error(errno)); + error("time error: %s", errno_error(errno)); return false; } @@ -119,7 +119,7 @@ bool time_tostring(const struct time *t, char *buffer, size_t len) assert(len >= TIME_BUFSIZE); if (!ctime_r(&t->secs, buffer)) { - error(L"time convertion error"); + error("time convertion error"); return false; } @@ -128,6 +128,21 @@ bool time_tostring(const struct time *t, char *buffer, size_t len) return true; } +bool time_format(const struct time *t, const char *format, char *buffer, size_t len) +{ + size_t size; + struct tm tm; + if (!gmtime_r(&t->secs, &tm)) { + error("time error: %s", errno_error(errno)); + return false; + } + + size = strftime(buffer, len, format, &tm); + buffer[size] = '\0'; + + return true; +} + bool time_isvalid(const struct time *t) { return t->secs != 0 || t->nsecs != 0; diff --git a/lib/haka/timer.c b/lib/haka/timer.c index fd1b6dc6..3b9dbf97 100644 --- a/lib/haka/timer.c +++ b/lib/haka/timer.c @@ -52,7 +52,7 @@ static struct time_realm_state *create_time_realm_state(struct time_realm *realm struct time_realm_state *state = malloc(sizeof(struct time_realm_state)); if (!state) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -67,7 +67,7 @@ static struct time_realm_state *create_time_realm_state(struct time_realm *realm if (timer_create(CLOCK_MONOTONIC, &sev, &state->timer)) { free(state); - error(L"timer creation error: %s", errno_error(errno)); + error("timer creation error: %s", errno_error(errno)); return NULL; } } @@ -118,7 +118,7 @@ bool time_realm_initialize(struct time_realm *realm, enum time_realm_mode mode) break; default: - error(L"invalid timer mode"); + error("invalid timer mode"); return false; } @@ -146,10 +146,10 @@ void time_realm_update(struct time_realm *realm, const struct time *value) sign = time_diff(&difftime, value, &oldtime); if (sign < 0) { - messagef(HAKA_LOG_DEBUG, L"timer", L"static time going backward (ignored)"); + messagef(HAKA_LOG_DEBUG, "timer", "static time going backward (ignored)"); } else { - messagef(HAKA_LOG_DEBUG, L"timer", L"static time offset %s%f seconds", sign >= 0? "+" : "-", time_sec(&difftime)); + messagef(HAKA_LOG_DEBUG, "timer", "static time offset %s%f seconds", sign >= 0? "+" : "-", time_sec(&difftime)); realm->time = *value; realm->check_timer = true; @@ -167,7 +167,7 @@ const struct time *time_realm_current_time(struct time_realm *realm) return &realm->time; default: - error(L"invalid timer mode"); + error("invalid timer mode"); return NULL; } } @@ -181,7 +181,7 @@ INIT static void _timer_init() sa.sa_sigaction = timer_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { - messagef(HAKA_LOG_FATAL, L"timer", L"%s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, "timer", "%s", errno_error(errno)); abort(); } @@ -209,7 +209,7 @@ struct timer *time_realm_timer(struct time_realm *realm, timer_callback callback timer = malloc(sizeof(struct timer)); if (!timer) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -258,7 +258,7 @@ static bool time_realm_update_timer_list(struct time_realm_state *state, if (list2_empty(&state->sorted_timer)) { /* stop the timer */ if (timer_settime(state->timer, 0, &ts, NULL) != 0) { - error(L"%s", errno_error(errno)); + error("%s", errno_error(errno)); return false; } } @@ -273,7 +273,7 @@ static bool time_realm_update_timer_list(struct time_realm_state *state, /* stop the timer, the timer will be restarted if needed by the * next call to timer_realm_check(). */ if (timer_settime(state->timer, 0, &ts, NULL) != 0) { - error(L"%s", errno_error(errno)); + error("%s", errno_error(errno)); return false; } } @@ -282,11 +282,11 @@ static bool time_realm_update_timer_list(struct time_realm_state *state, ts.it_value.tv_nsec = diff.nsecs; if (timer_settime(state->timer, 0, &ts, NULL) != 0) { - error(L"%s", errno_error(errno)); + error("%s", errno_error(errno)); return false; } - messagef(HAKA_LOG_DEBUG, L"timer", L"next timer in %f seconds", time_sec(&diff)); + messagef(HAKA_LOG_DEBUG, "timer", "next timer in %f seconds", time_sec(&diff)); } } } @@ -297,7 +297,7 @@ static bool time_realm_update_timer_list(struct time_realm_state *state, struct time current = *time_realm_current_time(state->realm); if (time_diff(&diff, &first->trigger_time, ¤t) > 0) { - messagef(HAKA_LOG_DEBUG, L"timer", L"next timer in %f seconds", time_sec(&diff)); + messagef(HAKA_LOG_DEBUG, "timer", "next timer in %f seconds", time_sec(&diff)); } } } @@ -316,7 +316,7 @@ static bool timer_start(struct timer *timer, struct time *delay, bool repeat) struct time_realm_state *state = get_time_realm_state(timer->realm, true); if (delay->secs == 0 && delay->nsecs == 0) { - error(L"invalid timer delay"); + error("invalid timer delay"); return false; } diff --git a/lib/haka/vbuffer.c b/lib/haka/vbuffer.c index 5409371f..a25c3456 100644 --- a/lib/haka/vbuffer.c +++ b/lib/haka/vbuffer.c @@ -50,7 +50,7 @@ static struct vbuffer_chunk *vbuffer_chunk_create_end(bool writable) { struct vbuffer_chunk *chunk = malloc(sizeof(struct vbuffer_chunk)); if (!chunk) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -89,7 +89,7 @@ struct vbuffer_chunk *vbuffer_chunk_create(struct vbuffer_data *data, size_t off struct vbuffer_chunk *chunk = malloc(sizeof(struct vbuffer_chunk)); if (!chunk) { if (data) data->ops->free(data); - error(L"memory error"); + error("memory error"); return NULL; } @@ -119,7 +119,7 @@ struct vbuffer_chunk *vbuffer_chunk_insert_ctl(struct vbuffer_chunk *insert, str chunk = malloc(sizeof(struct vbuffer_chunk)); if (!chunk) { if (data) data->ops->free(data); - error(L"memory error"); + error("memory error"); return NULL; } @@ -144,7 +144,7 @@ struct vbuffer_chunk *vbuffer_chunk_insert_ctl(struct vbuffer_chunk *insert, str INLINE bool vbuffer_chunk_check_writeable(struct vbuffer_chunk *chunk) { if (!chunk->flags.writable) { - error(L"read only buffer"); + error("read only buffer"); return false; } return true; @@ -444,7 +444,7 @@ static bool _vbuffer_iterator_check(const struct vbuffer_iterator *position) assert(position); if (!vbuffer_iterator_isvalid(position)) { - error(L"empty iterator"); + error("empty iterator"); return false; } @@ -453,7 +453,7 @@ static bool _vbuffer_iterator_check(const struct vbuffer_iterator *position) position->offset > position->chunk->size || !position->chunk->list.next || !position->chunk->list.prev) { - error(L"invalid buffer iterator"); + error("invalid buffer iterator"); return false; } } @@ -584,7 +584,7 @@ static bool _vbuffer_iterator_check_available(struct vbuffer_iterator *position, while (!iter->flags.end) { if (enditer && enditer == iter) { if (offset > endoffset) { - error(L"invalid buffer end"); + error("invalid buffer end"); return false; } @@ -953,7 +953,7 @@ bool vbuffer_iterator_unmark(struct vbuffer_iterator *position) mark = vbuffer_data_cast(position->chunk->data, vbuffer_data_ctl_mark); if (!mark) { - error(L"iterator is not a mark"); + error("iterator is not a mark"); return false; } @@ -1215,7 +1215,7 @@ static struct vbuffer_chunk *_vbuffer_sub_iterate(struct vbuffer_sub *data, size if (chunk == end) { if (endoffset < offset) { - error(L"invalid buffer end"); + error("invalid buffer end"); return NULL; } else { @@ -1419,7 +1419,7 @@ bool vbuffer_restore(struct vbuffer_iterator *position, struct vbuffer *data, bo ctl = vbuffer_data_cast(position->chunk->data, vbuffer_data_ctl_select); if (!ctl) { - error(L"invalid restore iterator"); + error("invalid restore iterator"); return false; } @@ -1596,7 +1596,7 @@ const uint8 *vbuffer_sub_flatten(struct vbuffer_sub *data, size_t *rsize) bool has_ctl = false; size_t size; if (!_vbuffer_compact(data, &has_ctl, &size) || has_ctl) { - error(L"buffer cannot be flatten"); + error("buffer cannot be flatten"); return NULL; } @@ -1684,7 +1684,7 @@ int64 vbuffer_asnumber(struct vbuffer_sub *data, bool bigendian) } if (length > 8) { - error(L"asnumber: unsupported size %zu", length); + error("asnumber: unsupported size %zu", length); return 0; } @@ -1709,7 +1709,7 @@ int64 vbuffer_asnumber(struct vbuffer_sub *data, bool bigendian) case 4: return bigendian ? SWAP_FROM_BE(int32, *(int32*)ptr) : SWAP_FROM_LE(int32, *(int32*)ptr); case 8: return bigendian ? SWAP_FROM_BE(int64, *(int64*)ptr) : SWAP_FROM_LE(int64, *(int64*)ptr); default: - error(L"asnumber: unsupported size %zu", length); + error("asnumber: unsupported size %zu", length); return 0; } } @@ -1723,13 +1723,13 @@ bool vbuffer_setnumber(struct vbuffer_sub *data, bool bigendian, int64 num) } if (length > 8) { - error(L"setnumber: unsupported size %zu", length); + error("setnumber: unsupported size %zu", length); return false; } if ((num < 0 && (-num & ((1ULL << (length*8))-1)) != -num) || (num >= 0 && (num & ((1ULL << (length*8))-1)) != num)) { - error(L"setnumber: invalid number, value does not fit in %d bytes", length); + error("setnumber: invalid number, value does not fit in %zu bytes", length); return false; } @@ -1747,7 +1747,7 @@ bool vbuffer_setnumber(struct vbuffer_sub *data, bool bigendian, int64 num) case 4: *(int32*)ptr = bigendian ? SWAP_TO_BE(int32, (int32)num) : SWAP_TO_LE(int32, (int32)num); break; case 8: *(int64*)ptr = bigendian ? SWAP_TO_BE(int64, num) : SWAP_TO_LE(int64, num); break; default: - error(L"setnumber: unsupported size %zu", length); + error("setnumber: unsupported size %zu", length); return false; } } @@ -1766,7 +1766,7 @@ bool vbuffer_setnumber(struct vbuffer_sub *data, bool bigendian, int64 num) case 4: temp.i32 = bigendian ? SWAP_TO_BE(int32, (int32)num) : SWAP_TO_LE(int32, (int32)num); break; case 8: temp.i64 = bigendian ? SWAP_TO_BE(int64, num) : SWAP_TO_LE(int64, num); break; default: - error(L"setnumber: unsupported size %zu", length); + error("setnumber: unsupported size %zu", length); return false; } @@ -1817,17 +1817,17 @@ int64 vbuffer_asbits(struct vbuffer_sub *data, size_t offset, size_t bits, bool } if (begin >= length) { - error(L"asbits: invalid bit offset"); + error("asbits: invalid bit offset"); return -1; } if (end > length) { - error(L"asbits: invalid bit size"); + error("asbits: invalid bit size"); return -1; } if (end > 8) { - error(L"asbits: unsupported size"); + error("asbits: unsupported size"); return -1; } @@ -1854,13 +1854,13 @@ int64 vbuffer_asbits(struct vbuffer_sub *data, size_t offset, size_t bits, bool bool vbuffer_setbits(struct vbuffer_sub *data, size_t offset, size_t bits, bool bigendian, int64 num) { if (bits > 64) { - error(L"setbits: unsupported size %zu", bits); + error("setbits: unsupported size %zu", bits); return false; } if ((num < 0 && (-num & ((1ULL << bits)-1)) != -num) || (num >= 0 && (num & ((1ULL << bits)-1)) != num)) { - error(L"setbits: invalid number, value does not fit in %d bits", bits); + error("setbits: invalid number, value does not fit in %zd bits", bits); return false; } @@ -1878,17 +1878,17 @@ bool vbuffer_setbits(struct vbuffer_sub *data, size_t offset, size_t bits, bool } if (begin >= length) { - error(L"setbits: invalid bit offset"); + error("setbits: invalid bit offset"); return -1; } if (end > length) { - error(L"setbits: invalid bit size"); + error("setbits: invalid bit size"); return -1; } if (end > 8) { - error(L"setbits: unsupported size"); + error("setbits: unsupported size"); return false; } diff --git a/lib/haka/vbuffer_data.c b/lib/haka/vbuffer_data.c index b11df99c..67eec7b9 100644 --- a/lib/haka/vbuffer_data.c +++ b/lib/haka/vbuffer_data.c @@ -63,7 +63,7 @@ struct vbuffer_data_basic *vbuffer_data_basic(size_t size, bool zero) { struct vbuffer_data_basic *buf = malloc(sizeof(struct vbuffer_data_basic) + size); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -127,7 +127,7 @@ struct vbuffer_data_ctl_select *vbuffer_data_ctl_select() { struct vbuffer_data_ctl_select *buf = malloc(sizeof(struct vbuffer_data_ctl_select)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -148,7 +148,7 @@ struct vbuffer_data_ctl_push *vbuffer_data_ctl_push(struct vbuffer_stream *strea { struct vbuffer_data_ctl_push *buf = malloc(sizeof(struct vbuffer_data_ctl_push)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -170,7 +170,7 @@ struct vbuffer_data_ctl_mark *vbuffer_data_ctl_mark(bool readonly) { struct vbuffer_data_ctl_mark *buf = malloc(sizeof(struct vbuffer_data_ctl_mark)); if (!buf) { - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/lib/haka/vbuffer_stream.c b/lib/haka/vbuffer_stream.c index 002ab990..bf53cb45 100644 --- a/lib/haka/vbuffer_stream.c +++ b/lib/haka/vbuffer_stream.c @@ -95,13 +95,13 @@ bool vbuffer_stream_push(struct vbuffer_stream *stream, struct vbuffer *buffer, struct vbuffer_stream_chunk *chunk; if (stream->data.chunks->flags.eof) { - error(L"stream marked as finished"); + error("stream marked as finished"); return false; } chunk = malloc(sizeof(struct vbuffer_stream_chunk)); if (!chunk) { - error(L"memory error"); + error("memory error"); return false; } diff --git a/lib/haka/vbuffer_sub_stream.c b/lib/haka/vbuffer_sub_stream.c index b386978d..795c6a0c 100644 --- a/lib/haka/vbuffer_sub_stream.c +++ b/lib/haka/vbuffer_sub_stream.c @@ -42,7 +42,7 @@ bool vbuffer_sub_stream_push(struct vbuffer_sub_stream *stream, struct vbuffer_s chunk = malloc(sizeof(struct vbuffer_sub_stream_chunk)); if (!chunk) { - error(L"memory error"); + error("memory error"); return false; } diff --git a/modules/CMakeLists.txt b/modules/CMakeLists.txt index 916e8dc0..c649d50a 100644 --- a/modules/CMakeLists.txt +++ b/modules/CMakeLists.txt @@ -16,6 +16,7 @@ set(CMAKE_SHARED_LIBRARY_SUFFIX "${HAKA_MODULE_SUFFIX}") set(MODULE_INSTALL_PATH share/haka/modules) set(MODULE_INSTALL_CPATH lib/haka/modules) +set(MODULE_INSTALL_LIB lib/haka) macro(INCLUDE_MODULE name type) include_directories(${MODULE_SOURCE_DIR}/${type}/${name}) @@ -24,14 +25,15 @@ endmacro(INCLUDE_MODULE) macro(DEPENDS_MODULE target name type) INCLUDE_MODULE(${name} ${type}) target_link_libraries(${target} ${name}) - set_target_properties(${target} PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib/haka/modules/${type}") + set_property(TARGET ${target} APPEND PROPERTY INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${MODULE_INSTALL_CPATH}/${type}") endmacro(DEPENDS_MODULE) macro(INSTALL_MODULE target type) set_property(GLOBAL APPEND PROPERTY module-${type} ${target}) include_directories(.) target_link_libraries(${target} libhaka) - install(TARGETS ${target} LIBRARY DESTINATION ${MODULE_INSTALL_CPATH}/${type}) + install(TARGETS ${target} LIBRARY DESTINATION ${MODULE_INSTALL_CPATH}/${type} ${ARGN}) + set_property(TARGET ${target} APPEND PROPERTY INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${MODULE_INSTALL_LIB}") endmacro(INSTALL_MODULE) # Find all modules. We need a separated loop in case one of the sub make adds diff --git a/modules/alert/elasticsearch/CMakeLists.txt b/modules/alert/elasticsearch/CMakeLists.txt new file mode 100644 index 00000000..8474be41 --- /dev/null +++ b/modules/alert/elasticsearch/CMakeLists.txt @@ -0,0 +1,15 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +if(TARGET libelasticsearch) + add_library(alert-elasticsearch MODULE main.c) + set_target_properties(alert-elasticsearch PROPERTIES OUTPUT_NAME elasticsearch) + + INCLUDE_MODULE(elasticsearch misc) + target_link_libraries(alert-elasticsearch libelasticsearch) + + DEPENDS_MODULE(alert-elasticsearch geoip misc) + DEPENDS_MODULE(alert-elasticsearch ipv4 protocol) + INSTALL_MODULE(alert-elasticsearch alert) +endif() diff --git a/modules/alert/elasticsearch/doc/ips_dahsboard.json b/modules/alert/elasticsearch/doc/ips_dahsboard.json new file mode 100644 index 00000000..00563be6 --- /dev/null +++ b/modules/alert/elasticsearch/doc/ips_dahsboard.json @@ -0,0 +1,507 @@ +{ + "title": "Haka IPS", + "services": { + "query": { + "list": { + "0": { + "query": "*", + "alias": "all", + "color": "#7EB26D", + "id": 0, + "pin": false, + "type": "lucene", + "enable": true + }, + "1": { + "id": 1, + "color": "#E24D42", + "alias": "high", + "pin": false, + "type": "lucene", + "enable": true, + "query": "severity:high" + }, + "2": { + "id": 2, + "color": "#EAB839", + "alias": "successful", + "pin": false, + "type": "lucene", + "enable": true, + "query": "completion:successful" + } + }, + "ids": [ + 0, + 1, + 2 + ] + }, + "filter": { + "list": { + "0": { + "type": "time", + "field": "time", + "from": "now-1h", + "to": "now", + "mandate": "must", + "active": true, + "alias": "", + "id": 0 + } + }, + "ids": [ + 0 + ] + } + }, + "rows": [ + { + "title": "Alerts", + "height": "200px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "span": 5, + "editable": true, + "type": "histogram", + "loadingEditor": false, + "mode": "count", + "time_field": "time", + "value_field": "", + "x-axis": true, + "y-axis": true, + "scale": 1, + "y_format": "none", + "grid": { + "max": null, + "min": 0 + }, + "queries": { + "mode": "selected", + "ids": [ + 0, + 1, + 2 + ] + }, + "annotate": { + "enable": true, + "query": "method.ref:cwe\\:801", + "size": 20, + "field": "sources.address", + "sort": [ + "_score", + "desc" + ] + }, + "auto_int": true, + "resolution": 100, + "interval": "30s", + "intervals": [ + "auto", + "1s", + "1m", + "5m", + "10m", + "30m", + "1h", + "3h", + "12h", + "1d", + "1w", + "1y" + ], + "lines": true, + "fill": 4, + "linewidth": 2, + "points": false, + "pointradius": 5, + "bars": false, + "stack": false, + "spyable": true, + "zoomlinks": true, + "options": true, + "legend": true, + "show_query": false, + "interactive": true, + "legend_counts": false, + "timezone": "browser", + "percentage": false, + "zerofill": true, + "derivative": false, + "tooltip": { + "value_type": "cumulative", + "query_as_alias": true + }, + "title": "Total alerts", + "scaleSeconds": false + }, + { + "error": false, + "span": 3, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "severity", + "exclude": [], + "missing": false, + "other": false, + "size": 10, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Severity" + }, + { + "error": false, + "span": 2, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "confidence", + "exclude": [], + "missing": false, + "other": false, + "size": 10, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Confidence" + }, + { + "error": false, + "span": 2, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "completion", + "exclude": [], + "missing": false, + "other": false, + "size": 10, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": false, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Completion" + } + ], + "notice": false + }, + { + "title": "Alerts 2", + "height": "200px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "error": false, + "span": 5, + "editable": true, + "type": "map", + "loadingEditor": false, + "map": "world", + "colors": [ + "#A0E2E2", + "#265656" + ], + "size": 150, + "exclude": [], + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "title": "Source", + "field": "sources.geo" + }, + { + "error": false, + "span": 3, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "method.ref", + "exclude": [], + "missing": false, + "other": false, + "size": 4, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": false, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "above", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "References" + }, + { + "error": false, + "span": 4, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "method.description", + "exclude": [], + "missing": false, + "other": false, + "size": 5, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": false, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "bar", + "counter_pos": "above", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Exploitation Method" + } + ], + "notice": false + }, + { + "title": "Summary", + "height": "150px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "error": false, + "span": 12, + "editable": true, + "type": "table", + "loadingEditor": false, + "size": 10, + "pages": 5, + "offset": 0, + "sort": [ + "targets.address", + "desc" + ], + "overflow": "min-height", + "fields": [ + "time", + "description", + "severity", + "confidence", + "completion", + "method.ref", + "sources.address", + "sources.service", + "targets.address", + "targets.service" + ], + "highlight": [ + "severity" + ], + "sortable": true, + "header": true, + "paging": true, + "field_list": false, + "all_fields": false, + "trimFactor": 300, + "localTime": false, + "timeField": "@timestamp", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2 + ] + }, + "style": { + "font-size": "9pt" + }, + "normTimes": true, + "title": "Summary" + } + ], + "notice": false + } + ], + "editable": true, + "failover": false, + "index": { + "interval": "none", + "pattern": "[logstash-]YYYY.MM.DD", + "default": "ips", + "warm_fields": false + }, + "style": "dark", + "panel_hints": true, + "pulldowns": [ + { + "type": "query", + "collapse": true, + "notice": false, + "enable": true, + "query": "*", + "pinned": true, + "history": [ + "completion:successful", + "severity:high", + "*", + "_type:alert", + "severity:medium", + "severity:*", + "severity:low", + "", + "confidence:high", + "*Alerts" + ], + "remember": 10 + }, + { + "type": "filtering", + "collapse": true, + "notice": true, + "enable": true + } + ], + "nav": [ + { + "type": "timepicker", + "collapse": false, + "notice": false, + "enable": true, + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "timefield": "time", + "now": true, + "filter_id": 0 + } + ], + "loader": { + "save_gist": false, + "save_elasticsearch": true, + "save_local": true, + "save_default": true, + "save_temp": true, + "save_temp_ttl_enable": true, + "save_temp_ttl": "30d", + "load_gist": true, + "load_elasticsearch": true, + "load_elasticsearch_size": 20, + "load_local": true, + "hide": false + }, + "refresh": "5s" +} \ No newline at end of file diff --git a/modules/alert/elasticsearch/doc/module.rst b/modules/alert/elasticsearch/doc/module.rst new file mode 100644 index 00000000..731279c2 --- /dev/null +++ b/modules/alert/elasticsearch/doc/module.rst @@ -0,0 +1,74 @@ +.. This Source Code Form is subject to the terms of the Mozilla Public +.. License, v. 2.0. If a copy of the MPL was not distributed with this +.. file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Elasticsearch alert `alert/elasticsearch` +========================================= + +Description +^^^^^^^^^^^ + +This module will exports all alerts to an elasticsearch server. It also adds also +some extra information such as geoip data. + +Parameters +^^^^^^^^^^ + +.. describe:: elasticsearch_server + + Elasticsearch server address. + + .. warning:: Be careful not to create security rules that block elasticsearch traffic. + + +.. describe:: elasticsearch_index + + Elasticsearch index. + + .. note:: If this field is missing, Haka will use ``ips`` as default kibana index. + +.. describe:: geoip_database + + Absolute file path to geoip data file. Optional field that provides + geolocalization support. + +Example : + +.. code-block:: ini + + [alert] + # Select the alert module + module = "alert/elasticsearch" + + # alert/elasticsearch module option + elasticsearch_server = "http://127.0.0.1:9200" + #elasticsearch_index = "ips" + geoip_database = "/usr/share/GeoIP/GeoIP.dat" + +Kibana and Elasticsearch setup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Install and start Elasticsearch server + +.. code-block:: console + + sudo dpkg -i elasticsearch-.deb + sudo service elasticsearch start + +* Install and setup Kibana + +.. code-block:: console + + tar -zxvf kibana-.tar.gz \ + --strip-components=1 \ + -C /kibana + +.. note:: you may need to edit 'config.js' file and set the elasticsearch + address (e.g. elasticsearch = http://127.0.0.1:9200) + +Kibana dashboard +^^^^^^^^^^^^^^^^ + +The dashboard :download:`ips_dahsboard.json` is an example of a Kibana dashboard that shows some info about haka alerts. + +.. note:: Set the elasticsearch index to ``elasticsearch_index`` value in the main kibana dashboard setting. diff --git a/modules/alert/elasticsearch/main.c b/modules/alert/elasticsearch/main.c new file mode 100644 index 00000000..663b89a2 --- /dev/null +++ b/modules/alert/elasticsearch/main.c @@ -0,0 +1,481 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* Limit the length of alert id suffixes */ +#define ALERT_ID_LENGTH 16 + +#define MODULE "elasticsearch-alert" + + +const char ELASTICSEARCH_INDEX[] = "ips"; + +static bool json_insert_string(json_t *obj, const char *key, const char *string) +{ + json_t *json_str = json_string(string); + if (!json_str) { + error("json string creation error"); + return false; + } + if (json_object_set_new(obj, key, json_str)) { + error("json object insertion error"); + return false; + } + return true; +} + +static bool json_insert_double(json_t *obj, const char *key, double val) +{ + json_t *json_val = json_real(val); + if (!json_val) { + error("json real creation error"); + return false; + } + if (json_object_set_new(obj, key, json_val)) { + error("json object insertion error"); + return false; + } + return true; +} + +static bool alert_add_geolocalization(json_t *list, char *address, struct geoip_handle *geoip_handler) +{ + if (geoip_handler) { + char country_code[3]; + ipv4addr addr = ipv4_addr_from_string(address); + if (addr && geoip_lookup_country(geoip_handler, addr, country_code)) { + json_t *json_country = json_string(country_code); + if (!json_country) { + error("json string creation error"); + return false; + } + if (json_array_append_new(list, json_country)) { + error("json array insertion error"); + return false; + } + } + } + return true; +} + +json_t *json_get_or_create_list(json_t *obj, const char *key) +{ + json_t *array = json_object_get(obj, key); + if (!array) { + array = json_array(); + if (!array) { + error("json array creation error"); + return NULL; + } + if (json_object_set(obj, key, array) < 0) { + error("json object insertion error"); + json_decref(array); + return NULL; + } + } + else { + json_incref(array); + } + return array; +} + +static bool json_insert_list(json_t *obj, const char *key, char **array) +{ + char **iter; + json_t *json_str; + + json_t *nodes = json_array(); + if (!nodes) { + error("json array creation error"); + return false; + } + + for (iter = array; *iter; ++iter) { + json_str = json_string(*iter); + if (!json_str) { + error("json string creation error"); + json_decref(nodes); + return false; + } + if (json_array_append_new(nodes, json_str)) { + error("json array insertion error"); + return false; + } + } + + if (json_object_set_new(obj, key, nodes)) { + error("json object insertion error"); + return false; + } + + return true; +} + +static bool json_insert_address(json_t *obj, char **list, struct geoip_handle *geoip_handler) +{ + json_t *json_str; + char **iter; + json_t *address, *geo; + + address = json_get_or_create_list(obj, "address"); + if (!address) { + return false; + } + + geo = json_get_or_create_list(obj, "geo"); + if (!geo) { + json_decref(address); + return false; + } + + for (iter = list; *iter; ++iter) { + alert_add_geolocalization(geo, *iter, geoip_handler); + json_str = json_string(*iter); + if (!json_str) { + error("json string creation error"); + json_decref(address); + json_decref(geo); + return false; + } + if (json_array_append_new(address, json_str)) { + error("json array insertion error"); + json_decref(address); + json_decref(geo); + return false; + } + } + return true; +} + +static void json_create_mapping(struct elasticsearch_connector *connector, const char *index) +{ + json_t *mapping = json_pack("{s{s{s{s{s{s{ssss}s{ssss}}}}}}}", + "mappings", "alert", "properties", "method", "properties", + "ref", "type", "string", "index", "not_analyzed", "description", + "type", "string", "index", "not_analyzed" + ); + if (!mapping) { + error("json mapping creation error"); + return; + } + if (!elasticsearch_newindex(connector, index, mapping)) { + error("elasticsearch index creation error"); + json_decref(mapping); + return; + } + json_decref(mapping); +} + +json_t *alert_tojson(const struct time *time, const struct alert *alert, struct geoip_handle *geoip_handler) +{ + struct alert_node **iter; + json_t *ret = json_object(); + if (!ret) { + error("json object creation error"); + return NULL; + } + + { + char timestr[TIME_BUFSIZE]; + elasticsearch_formattimestamp(time, timestr, TIME_BUFSIZE); + if (!json_insert_string(ret, "time", timestr)) { + json_decref(ret); + return NULL; + } + } + + if (time_isvalid(&alert->start_time)) { + char timestr[TIME_BUFSIZE]; + time_tostring(&alert->start_time, timestr, TIME_BUFSIZE); + if (!json_insert_string(ret, "start time", timestr)) { + json_decref(ret); + return NULL; + } + } + + if (time_isvalid(&alert->end_time)) { + char timestr[TIME_BUFSIZE]; + time_tostring(&alert->end_time, timestr, TIME_BUFSIZE); + if (!json_insert_string(ret, "end time", timestr)) { + json_decref(ret); + return NULL; + } + } + + if (alert->severity > HAKA_ALERT_LEVEL_NONE && alert->severity < HAKA_ALERT_NUMERIC) { + if (!json_insert_string(ret, "severity", alert_level_to_str(alert->severity))) { + json_decref(ret); + return NULL; + } + } + + if (alert->confidence > HAKA_ALERT_LEVEL_NONE) { + if (alert->confidence == HAKA_ALERT_NUMERIC) { + if (!json_insert_double(ret, "confidence", alert->confidence_num)) { + json_decref(ret); + return NULL; + } + } + else { + if (!json_insert_string(ret, "confidence", alert_level_to_str(alert->confidence))) { + json_decref(ret); + return NULL; + } + } + } + + if (alert->completion > HAKA_ALERT_COMPLETION_NONE) { + if (!json_insert_string(ret, "completion", alert_completion_to_str(alert->completion))) { + json_decref(ret); + return NULL; + } + } + + if (alert->description) { + if (!json_insert_string(ret, "description", alert->description)) { + json_decref(ret); + return NULL; + } + } + + if (alert->method_description || alert->method_ref) { + json_t *desc = json_object(); + if (!desc) { + error("json object creation error"); + json_decref(ret); + return NULL; + } + + if (alert->method_description) { + if (!json_insert_string(desc, "description", alert->method_description)) { + json_decref(desc); + json_decref(ret); + return NULL; + } + } + + if (alert->method_ref) { + if (!json_insert_list(desc, "ref", alert->method_ref)) { + json_decref(desc); + json_decref(ret); + return NULL; + } + } + + if (json_object_set_new(ret, "method", desc) < 0) { + error("json object insertion error"); + json_decref(ret); + return NULL; + } + } + + if (alert->sources) { + json_t *sources = json_object(); + if (!sources) { + error("json object creation error"); + json_decref(ret); + return NULL; + } + for (iter = alert->sources; *iter; ++iter) { + if (strcmp(alert_node_to_str((*iter)->type), "address") == 0) { + if (!json_insert_address(sources, (*iter)->list, geoip_handler)) { + json_decref(sources); + json_decref(ret); + return NULL; + } + } + else { + if (!json_insert_list(sources, "services", (*iter)->list)) { + json_decref(sources); + json_decref(ret); + return NULL; + } + } + } + + if (!json_object_set_new(ret, "sources", sources) < 0) { + error("json object insertion error"); + json_decref(ret); + return NULL; + } + } + + if (alert->targets) { + json_t *targets = json_object(); + if (!targets) { + error("json object creation error"); + json_decref(ret); + return NULL; + } + for (iter = alert->targets; *iter; ++iter) { + if (strcmp(alert_node_to_str((*iter)->type), "address") == 0) { + if (!json_insert_address(targets, (*iter)->list, geoip_handler)) { + json_decref(targets); + json_decref(ret); + return NULL; + } + } + else { + if (!json_insert_list(targets, "services", (*iter)->list)) { + json_decref(targets); + json_decref(ret); + } + } + } + + if (!json_object_set_new(ret, "targets", targets) < 0) { + error("json object insertion error"); + json_decref(ret); + return NULL; + } + } + + return ret; +} + +struct elasticsearch_alerter { + struct alerter_module module; + struct elasticsearch_connector *connector; + char *server; + char *index; + struct geoip_handle *geoip_handler; + char alert_id_prefix[ELASTICSEARCH_ID_LENGTH + 1]; +}; + +static int init(struct parameters *args) +{ + return 0; +} + +static void cleanup() +{ +} + +static bool do_alert(struct alerter *state, uint64 id, const struct time *time, const struct alert *alert) +{ + struct elasticsearch_alerter *alerter = (struct elasticsearch_alerter *)state; + + json_t *ret; + char elasticsearch_id[ELASTICSEARCH_ID_LENGTH + ALERT_ID_LENGTH + 1]; + snprintf(elasticsearch_id, ELASTICSEARCH_ID_LENGTH + ALERT_ID_LENGTH + 1, + "%s%llx", alerter->alert_id_prefix, id); + + ret = alert_tojson(time, alert, alerter->geoip_handler); + return ret && elasticsearch_insert(alerter->connector, alerter->index, "alert", elasticsearch_id, ret); +} + +static bool do_alert_update(struct alerter *state, uint64 id, const struct time *time, const struct alert *alert) +{ + struct elasticsearch_alerter *alerter = (struct elasticsearch_alerter *)state; + + json_t *ret; + char elasticsearch_id[ELASTICSEARCH_ID_LENGTH + ALERT_ID_LENGTH + 1]; + snprintf(elasticsearch_id, ELASTICSEARCH_ID_LENGTH + ALERT_ID_LENGTH + 1, + "%s%llx", alerter->alert_id_prefix, id); + + ret = alert_tojson(time, alert, alerter->geoip_handler); + return ret && elasticsearch_update(alerter->connector, alerter->index, "alert", elasticsearch_id, ret); +} + +void cleanup_alerter(struct alerter_module *module) +{ + struct elasticsearch_alerter *alerter = (struct elasticsearch_alerter *)module; + if (alerter->connector) { + elasticsearch_connector_close(alerter->connector); + free(alerter->server); + free(alerter->index); + } + if (alerter->geoip_handler) { + geoip_destroy(alerter->geoip_handler); + } + free(alerter); +} + +struct alerter_module *init_alerter(struct parameters *args) +{ + struct elasticsearch_alerter *elasticsearch_alerter = malloc(sizeof(struct elasticsearch_alerter)); + if (!elasticsearch_alerter) { + error("memory error"); + return NULL; + } + + elasticsearch_alerter->module.alerter.alert = do_alert; + elasticsearch_alerter->module.alerter.update = do_alert_update; + + const char *server = parameters_get_string(args, "elasticsearch_server", NULL); + if (!server) { + error("missing elasticsearch address server"); + free(elasticsearch_alerter); + return NULL; + } + + elasticsearch_alerter->server = strdup(server); + if (!elasticsearch_alerter->server) { + error("memory error"); + free(elasticsearch_alerter); + return NULL; + } + + elasticsearch_alerter->connector = elasticsearch_connector_new(elasticsearch_alerter->server); + if (!elasticsearch_alerter->connector) { + error("enable to connect to elasticsearch server %s", elasticsearch_alerter->server); + cleanup_alerter(&elasticsearch_alerter->module); + return NULL; + } + + const char *index = parameters_get_string(args, "elasticsearch_index", NULL); + if (!index) { + elasticsearch_alerter->index = strdup(ELASTICSEARCH_INDEX); + } + else { + elasticsearch_alerter->index = strdup(index); + } + if (!elasticsearch_alerter->index) { + error("memory error"); + return NULL; + } + messagef(HAKA_LOG_DEBUG, MODULE, "using elasticsearch index %s", + elasticsearch_alerter->index); + + elasticsearch_genid(elasticsearch_alerter->alert_id_prefix, ELASTICSEARCH_ID_LENGTH); + messagef(HAKA_LOG_DEBUG, MODULE, "generating global id prefix %s", + elasticsearch_alerter->alert_id_prefix); + + json_create_mapping(elasticsearch_alerter->connector, elasticsearch_alerter->index); + + const char *database = parameters_get_string(args, "geoip_database", NULL); + if (database) { + elasticsearch_alerter->geoip_handler = geoip_initialize(database); + } + else { + elasticsearch_alerter->geoip_handler = NULL; + messagef(HAKA_LOG_WARNING, "geoip", "missing geoip database, the ip geographic data will not be collected"); + } + + return &elasticsearch_alerter->module; +} + +struct alert_module HAKA_MODULE = { + module: { + type: MODULE_ALERT, + name: "Elasticsearch alert", + description: "Alert output to elasticsearch server", + api_version: HAKA_API_VERSION, + init: init, + cleanup: cleanup + }, + init_alerter: init_alerter, + cleanup_alerter: cleanup_alerter +}; diff --git a/modules/alert/file/main.c b/modules/alert/file/main.c index 08614112..c1b36fcc 100644 --- a/modules/alert/file/main.c +++ b/modules/alert/file/main.c @@ -77,7 +77,7 @@ static bool write_to_file(struct file_alerter *alerter, uint64 id, const struct } flockfile(alerter->output); - fprintf(alerter->output, "%salert%s: %s%ls\n", color, clear, + fprintf(alerter->output, "%salert%s: %s%s\n", color, clear, update ? "update " : "", alert_tostring(id, time, alert, "", indent, alerter->color)); funlockfile(alerter->output); @@ -98,7 +98,7 @@ struct alerter_module *init_alerter(struct parameters *args) { struct file_alerter *file_alerter = malloc(sizeof(struct file_alerter)); if (!file_alerter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -115,7 +115,7 @@ struct alerter_module *init_alerter(struct parameters *args) } if (file_alerter->format == FORMAT_LAST) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -123,7 +123,7 @@ struct alerter_module *init_alerter(struct parameters *args) if (filename && strcmp(filename, "-") != 0) { file_alerter->output = fopen(filename, "a"); if (!file_alerter->output) { - error(L"cannot open file '%s' for alert", filename); + error("cannot open file '%s' for alert", filename); return NULL; } } else { @@ -150,8 +150,8 @@ void cleanup_alerter(struct alerter_module *module) struct alert_module HAKA_MODULE = { module: { type: MODULE_ALERT, - name: L"File alert", - description: L"Alert output to file", + name: "File alert", + description: "Alert output to file", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/alert/syslog/main.c b/modules/alert/syslog/main.c index c7ea7ca1..616523de 100644 --- a/modules/alert/syslog/main.c +++ b/modules/alert/syslog/main.c @@ -30,7 +30,7 @@ static void cleanup() static bool post(uint64 id, const struct time *time, const struct alert *alert, bool update) { - syslog(LOG_NOTICE, "alert: %s%ls", update ? "update " : "", alert_tostring(id, time, alert, "", " ", false)); + syslog(LOG_NOTICE, "alert: %s%s", update ? "update " : "", alert_tostring(id, time, alert, "", " ", false)); return true; } @@ -63,8 +63,8 @@ void cleanup_alerter(struct alerter_module *alerter) struct alert_module HAKA_MODULE = { module: { type: MODULE_ALERT, - name: L"Syslog alert", - description: L"Alert output to syslog", + name: "Syslog alert", + description: "Alert output to syslog", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/log/syslog/main.c b/modules/log/syslog/main.c index a1efd8b8..a9e567e8 100644 --- a/modules/log/syslog/main.c +++ b/modules/log/syslog/main.c @@ -34,11 +34,11 @@ static const int syslog_level[HAKA_LOG_LEVEL_LAST] = { LOG_DEBUG, }; -static int logger_message(struct logger *state, log_level lvl, const wchar_t *module, const wchar_t *message) +static int logger_message(struct logger *state, log_level lvl, const char *module, const char *message) { /* Send log to syslog */ assert(lvl >= 0 && lvl < HAKA_LOG_LEVEL_LAST); - syslog(syslog_level[lvl], "%ls: %ls", module, message); + syslog(syslog_level[lvl], "%s: %s", module, message); return 0; } @@ -60,8 +60,8 @@ void cleanup_logger(struct logger_module *logger) struct log_module HAKA_MODULE = { module: { type: MODULE_LOG, - name: L"Syslog logger", - description: L"Logger to syslog", + name: "Syslog logger", + description: "Logger to syslog", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/misc/elasticsearch/CMakeLists.txt b/modules/misc/elasticsearch/CMakeLists.txt new file mode 100644 index 00000000..7319b3e5 --- /dev/null +++ b/modules/misc/elasticsearch/CMakeLists.txt @@ -0,0 +1,37 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_package(Jansson) +find_package(LibCurl) +find_package(LibUuid) + +if(JANSSON_FOUND AND LIBCURL_FOUND AND LIBUUID_FOUND) + swig_process(elasticsearchswig lua elasticsearch.i) + + add_library(libelasticsearch SHARED + elasticsearch.c + json.c + ) + + include_directories(${JANSSON_INCLUDE_DIR} ${LIBCURL_INCLUDE_DIR} ${LIBUUID_INCLUDE_DIR}) + target_link_libraries(libelasticsearch LINK_PUBLIC ${JANSSON_LIBRARY}) + target_link_libraries(libelasticsearch LINK_PRIVATE ${LIBCURL_LIBRARY} ${LIBUUID_LIBRARY} libhaka) + set_target_properties(libelasticsearch PROPERTIES VERSION ${HAKA_VERSION_MAJOR}.${HAKA_VERSION_MINOR}.${HAKA_VERSION_PATCH} + SOVERSION ${HAKA_VERSION_MAJOR}) + + install(TARGETS libelasticsearch LIBRARY DESTINATION ${MODULE_INSTALL_LIB}) + + add_library(elasticsearch MODULE + main.c + ${SWIG_elasticsearchswig_FILES} + ) + + target_link_libraries(elasticsearch libelasticsearch) + + SWIG_FIX_ENTRYPOINT(elasticsearch misc) + + INSTALL_MODULE(elasticsearch misc) +else() + message(STATUS "Not building module elasticsearch (missing libraries)") +endif() diff --git a/modules/misc/elasticsearch/doc/elasticsearch.rst b/modules/misc/elasticsearch/doc/elasticsearch.rst new file mode 100644 index 00000000..d0b9183e --- /dev/null +++ b/modules/misc/elasticsearch/doc/elasticsearch.rst @@ -0,0 +1,96 @@ +.. This Source Code Form is subject to the terms of the Mozilla Public +.. License, v. 2.0. If a copy of the MPL was not distributed with this +.. file, You can obtain one at http://mozilla.org/MPL/2.0/. + +.. highlightlang:: lua + +ElasticSearch +============= + +.. haka:module:: elasticsearch + +ElasticSearch database connector for Haka. + +**Usage:** + +:: + + local elasticsearch = require('misc/elasticsearch') + +API +--- + +.. haka:function:: connector(host) -> connector + + :param host: ElasticSearch host name. + :ptype host: string + + Create a new ElasticSearch connector and connect to the given address. + +.. haka:class:: Connector + + .. haka:method:: Connector:newindex(index, data) + + :param index: ElasticSearch index name. + :ptype index: string + :param data: Data to pass to the ElasticSearch server (check the + ElasticSearch API for more detail about it). + :ptype data: table + + Create a new index in the ElasticSearch database. + + .. haka:method:: Connector:insert(index, type, id, data) + + :param index: ElasticSearch index name. + :ptype index: string + :param type: Object type. + :ptype type: string + :param id: Optional object id (can be ``nil``). + :ptype id: string + :param data: Object data. + :ptype data: table + + Insert a new object in the ElasticSearch database. + + .. haka:method:: Connector:update(index, type, id, data) + + :param index: ElasticSearch index name. + :ptype index: string + :param type: Object type. + :ptype type: string + :param id: Object id. + :ptype id: string + :param data: Object data to update. + :ptype data: table + + Update some data of an existing object in the ElasticSearch database. + + .. haka:method:: Connector:timestamp(time) -> formated + + :param time: Time. + :ptype time: :haka:class:`haka.time` + :return formated: Formated timestamp in ElasticSearch format. + :rtype formated: string + + Render a timestamp to the standard ElasticSearch format. + + .. haka:method:: Connector:genid() -> id + + :return id: Unique id. + :rtype id: string + + Generate a unique id which can be used as an object id. + + .. seealso:: :haka:func:`.insert`. + +Example +------- + +:: + + local elasticsearch = require('misc/elasticsearch') + + local connector = elasticsearch.connector('http://127.0.0.1:9200') + + connector:insert("myindex", "mytype", nil, { name="object name" }) + diff --git a/modules/misc/elasticsearch/elasticsearch.c b/modules/misc/elasticsearch/elasticsearch.c new file mode 100644 index 00000000..b151e264 --- /dev/null +++ b/modules/misc/elasticsearch/elasticsearch.c @@ -0,0 +1,680 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include "haka/elasticsearch.h" + +#define MODULE "elasticsearch" + + +static bool initialized = false; + +static bool init() +{ + if (!initialized) { + if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) { + error("unable to initialize curl library"); + return false; + } + + initialized = true; + } + + return true; +} + +FINI static void cleanup() +{ + if (initialized) { + curl_global_cleanup(); + } +} + + +static char base64_encoding_table[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', + 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '='}; + +static void base64_encode(const unsigned char *data, + size_t input_length, char *output) +{ + int j = 0; + + for (; input_length >= 3; data+=3, input_length-=3) { + const uint32 triple = (data[2] << 0x10) + (data[1] << 0x08) + data[0]; + + output[j++] = base64_encoding_table[(triple >> 0 * 6) & 0x3F]; + output[j++] = base64_encoding_table[(triple >> 1 * 6) & 0x3F]; + output[j++] = base64_encoding_table[(triple >> 2 * 6) & 0x3F]; + output[j++] = base64_encoding_table[(triple >> 3 * 6) & 0x3F]; + } + + /* Leftover */ + if (input_length > 0) { + const uint32 b0 = data[0]; + const uint32 b1 = input_length > 1 ? data[1] : 0; + const uint32 b2 = input_length > 2 ? data[2] : 0; + + const uint32 triple = (b2 << 0x10) + (b1 << 0x08) + b0; + + output[j++] = base64_encoding_table[(triple >> 0 * 6) & 0x3F]; + output[j++] = base64_encoding_table[(triple >> 1 * 6) & 0x3F]; + if (input_length > 1) { + output[j++] = base64_encoding_table[(triple >> 2 * 6) & 0x3F]; + if (input_length > 2) { + output[j++] = base64_encoding_table[(triple >> 3 * 6) & 0x3F]; + } + } + } + + output[j] = '\0'; +} + + +struct elasticsearch_request { + struct list2_elem list; + enum { + INSERT, + UPDATE, + NEWINDEX, + } request_type; + char *index; + char *type; + char *id; + char *data; +}; + +struct elasticsearch_connector { + char *server_address; + CURL *curl; + mutex_t request_mutex; + semaphore_t request_wait; + struct list2 request; + struct vector request_content; + thread_t request_thread; + bool started:1; + bool exit:1; +}; + + +static void free_request(struct elasticsearch_request *req) +{ + free(req->index); + free(req->type); + free(req->id); + free(req->data); + free(req); +} + +static void *elasticsearch_request_thread(void *_connector); + + +static size_t write_callback_null(char *ptr, size_t size, size_t nmemb, void *userdata) +{ + return size*nmemb; +} + +struct callback_sting_data { + char *string; + size_t rem; +}; + +static size_t read_callback_string(char *buffer, size_t size, size_t nmemb, void *_data) +{ + size_t maxsize; + struct callback_sting_data *data = _data; + + maxsize = size*nmemb; + if (maxsize > data->rem) maxsize = data->rem; + + memcpy(buffer, data->string, maxsize); + data->string += maxsize; + data->rem -= maxsize; + + return maxsize; +} + +static size_t write_callback_string(char *ptr, size_t size, size_t nmemb, void *_data) +{ + struct callback_sting_data *data = _data; + + size *= nmemb; + + data->string = realloc(data->string, data->rem + size + 1); + if (!data->string) { + error("memory error"); + return 0; + } + + memcpy(data->string+data->rem, ptr, size); + *(data->string+data->rem+size) = '\0'; + data->rem += size; + + return size; +} + +static bool start_request_thread(struct elasticsearch_connector *connector) +{ + if (!connector->started) { + if (!thread_create(&connector->request_thread, &elasticsearch_request_thread, connector)) { + return false; + } + } + return true; +} + +bool elasticsearch_formattimestamp(const struct time *time, char *timestr, size_t size) { + return time_format(time, "%Y/%m/%d %H:%M:%S", timestr, size); +} + +struct elasticsearch_connector *elasticsearch_connector_new(const char *server) +{ + struct elasticsearch_connector *ret; + + if (!init()) { + return NULL; + } + + ret = malloc(sizeof(struct elasticsearch_connector)); + if (!ret) { + error("memory error"); + return NULL; + } + + memset(ret, 0, sizeof(struct elasticsearch_connector)); + + if (!mutex_init(&ret->request_mutex, false)) { + free(ret); + return NULL; + } + + if (!semaphore_init(&ret->request_wait, 0)) { + free(ret); + return NULL; + } + + list2_init(&ret->request); + vector_create(&ret->request_content, char, NULL); + ret->exit = false; + + ret->server_address = strdup(server); + if (!ret->server_address) { + elasticsearch_connector_close(ret); + return NULL; + } + + ret->curl = curl_easy_init(); + if (!ret->curl) { + error("unable to initialize curl session"); + elasticsearch_connector_close(ret); + return NULL; + } + + /* Uses of signal is not possible here in multi-threaded environment */ + curl_easy_setopt(ret->curl, CURLOPT_NOSIGNAL, 1L); + + ret->started = false; + + return ret; +} + +bool elasticsearch_connector_close(struct elasticsearch_connector *connector) +{ + list2_iter iter, end; + + /* Stop request thread */ + connector->exit = true; + semaphore_post(&connector->request_wait); + thread_join(connector->request_thread, NULL); + + end = list2_end(&connector->request); + for (iter = list2_begin(&connector->request); iter != end; ) { + struct elasticsearch_request *req = list2_get(iter, struct elasticsearch_request, list); + iter = list2_erase(iter); + free_request(req); + } + + mutex_destroy(&connector->request_mutex); + semaphore_destroy(&connector->request_wait); + vector_destroy(&connector->request_content); + + if (connector->curl) curl_easy_cleanup(connector->curl); + free(connector->server_address); + free(connector); + return true; +} + +static int elasticsearch_post(struct elasticsearch_connector *connector, const char *url, + const char *data, json_t **json_res) +{ + CURLcode res; + struct callback_sting_data reqdata, resdata; + long ret_code; + + reqdata.string = (char *)data; + reqdata.rem = strlen(reqdata.string); + + resdata.string = NULL; + resdata.rem = 0; + + curl_easy_setopt(connector->curl, CURLOPT_POST, 1L); + + if (json_res) { + curl_easy_setopt(connector->curl, CURLOPT_WRITEFUNCTION, &write_callback_string); + curl_easy_setopt(connector->curl, CURLOPT_WRITEDATA, &resdata); + } + else { + curl_easy_setopt(connector->curl, CURLOPT_WRITEFUNCTION, &write_callback_null); + curl_easy_setopt(connector->curl, CURLOPT_WRITEDATA, NULL); + } + + curl_easy_setopt(connector->curl, CURLOPT_READFUNCTION, &read_callback_string); + curl_easy_setopt(connector->curl, CURLOPT_READDATA, &reqdata); + curl_easy_setopt(connector->curl, CURLOPT_POSTFIELDSIZE, reqdata.rem); + curl_easy_setopt(connector->curl, CURLOPT_POSTFIELDS, NULL); + curl_easy_setopt(connector->curl, CURLOPT_URL, url); + + curl_easy_setopt(connector->curl, CURLOPT_TIMEOUT, 5); + + res = curl_easy_perform(connector->curl); + + if (res != CURLE_OK) { + error("post error: %s", curl_easy_strerror(res)); + free(resdata.string); + return -res; + } + + res = curl_easy_getinfo(connector->curl, CURLINFO_RESPONSE_CODE, &ret_code); + if (res != CURLE_OK) { + error("post error: %s", curl_easy_strerror(res)); + free(resdata.string); + return -res; + } + + messagef(HAKA_LOG_DEBUG, MODULE, "post successful: %s return %lu", url, ret_code); + + /* Check for the rest API return code, treat non 2** has error. */ + if (ret_code < 200 || ret_code >= 300) { + free(resdata.string); + return ret_code; + } + + if (json_res) { + if (!resdata.string) { + error("post error: invalid json response"); + free(resdata.string); + return -CURL_LAST; + } + + *json_res = json_loads(resdata.string, 0, NULL); + free(resdata.string); + + if (!(*json_res)) { + error("post error: invalid json response"); + return -CURL_LAST; + } + } + + return 0; +} + +static void append(struct vector *string, const char *str) +{ + const size_t len = strlen(str); + const size_t index = vector_count(string); + + vector_resize(string, index+len); + memcpy(vector_get(string, char, index), str, len); +} + +static void push_request(struct elasticsearch_connector *connector, struct elasticsearch_request *req, + bool delayed) +{ + if (!delayed) { + start_request_thread(connector); + } + + list2_elem_init(&req->list); + + mutex_lock(&connector->request_mutex); + list2_insert(list2_end(&connector->request), &req->list); + semaphore_post(&connector->request_wait); + mutex_unlock(&connector->request_mutex); +} + +#define BUFFER_SIZE 1024 + +static int do_one_request(struct elasticsearch_connector *connector, const char *url, const char *data, + int *lasterror) +{ + const int code = elasticsearch_post(connector, url, data, NULL); + if (check_error()) { + assert(code < 0); + + if (!lasterror || code != *lasterror) { + messagef(HAKA_LOG_ERROR, MODULE, "request failed: %s", clear_error()); + if (lasterror) *lasterror = code; + } + + return -1; + } + return code; +} + +static void *elasticsearch_request_thread(void *_connector) +{ + struct elasticsearch_connector *connector = _connector; + char buffer[BUFFER_SIZE]; + char url[BUFFER_SIZE]; + int lasterror = 0; + + connector->started = true; + + while (!connector->exit) { + struct list2 copy; + list2_iter iter, end; + int code; + + list2_init(©); + + /* Wait for request */ + semaphore_wait(&connector->request_wait); + + /* Get the requests */ + mutex_lock(&connector->request_mutex); + if (list2_empty(&connector->request)) { + mutex_unlock(&connector->request_mutex); + continue; + } + + list2_swap(&connector->request, ©); + mutex_unlock(&connector->request_mutex); + + /* Build request data */ + vector_resize(&connector->request_content, 0); + + end = list2_end(©); + for (iter = list2_begin(©); iter != end; ) { + struct elasticsearch_request *req = list2_get(iter, struct elasticsearch_request, list); + + switch (req->request_type) { + case NEWINDEX: + { + snprintf(url, BUFFER_SIZE, "%s/%s", connector->server_address, req->index); + code = do_one_request(connector, url, req->data, &lasterror); + if (code > 0 && code != 400) { + messagef(HAKA_LOG_ERROR, MODULE, "request failed: %s return error %d", url, code); + } + } + break; + + case INSERT: + case UPDATE: + { + snprintf(buffer, BUFFER_SIZE, "{ \"%s\" : { \"_index\" : \"%s\", \"_type\" : \"%s\"", + (req->request_type == INSERT ? "index" : "update"), req->index, req->type); + append(&connector->request_content, buffer); + + if (req->id) { + snprintf(buffer, BUFFER_SIZE, ", \"_id\" : \"%s\"", req->id); + append(&connector->request_content, buffer); + } + + append(&connector->request_content, " } }\n"); + append(&connector->request_content, req->data); + append(&connector->request_content, "\n"); + } + break; + + default: + messagef(HAKA_LOG_ERROR, MODULE, "invalid request type: %d", req->request_type); + break; + } + + iter = list2_erase(iter); + free_request(req); + } + + /* Do bulk request if needed :*/ + if (vector_count(&connector->request_content) > 0) { + snprintf(url, BUFFER_SIZE, "%s/_bulk", connector->server_address); + *vector_push(&connector->request_content, char) = '\0'; + + code = do_one_request(connector, url, vector_first(&connector->request_content, char), &lasterror); + if (code) { + if (code != -1) { + messagef(HAKA_LOG_ERROR, MODULE, "request failed: %s return error %d", url, code); + } + } + } + } + + connector->started = false; + + return NULL; +} + +#define DUPSTR(name) \ + if (name) { \ + req->name = strdup(name); \ + if (!req->name) { \ + error("memory error"); \ + free_request(req); \ + return false; \ + } \ + } + +static bool elasticsearch_request(struct elasticsearch_connector *connector, bool delayed, + int reqtype, const char *index, const char *type, const char *id, json_t *data) +{ + struct elasticsearch_request *req; + + assert(connector); + + req = malloc(sizeof(struct elasticsearch_request)); + if (!req) { + error("memory error"); + free(data); + return false; + } + + memset(req, 0, sizeof(struct elasticsearch_request)); + + req->request_type = reqtype; + + DUPSTR(index); + DUPSTR(type); + DUPSTR(id); + + req->data = json_dumps(data, JSON_COMPACT); + json_decref(data); + if (!req->data) { + error("cannot dump json object"); + free_request(req); + return false; + } + + push_request(connector, req, delayed); + return true; + +} + +void elasticsearch_genid(char *id, size_t size) +{ + assert(size >= ELASTICSEARCH_ID_LENGTH); + uuid_t uuid; + uuid_generate(uuid); + base64_encode(uuid, 16, id); +} + +bool elasticsearch_newindex(struct elasticsearch_connector *connector, const char *index, json_t *data) +{ + assert(connector); + assert(index); + + /* This request is delayed, it will wait for the next request to start the processing thread + * if it is not already started. */ + return elasticsearch_request(connector, true, NEWINDEX, index, NULL, NULL, data); +} + +bool elasticsearch_insert(struct elasticsearch_connector *connector, const char *index, + const char *type, const char *id, json_t *data) +{ + + assert(connector); + assert(index); + assert(type); + + return elasticsearch_request(connector, false, INSERT, index, type, id, data); +} + +bool elasticsearch_update(struct elasticsearch_connector *connector, const char *index, const char *type, + const char *id, json_t *data) +{ + json_t *json_update; + + assert(connector); + assert(index); + assert(type); + assert(id); + + json_update = json_object(); + if (!json_update || json_object_set(json_update, "doc", data)) { + error("memory error"); + json_decref(data); + return false; + } + + json_decref(data); + + return elasticsearch_request(connector, false, UPDATE, index, type, id, json_update); +} + +#if 0 +/* Elasticsearch synchroneous API not used any more */ +bool elasticsearch_insert_sync(struct elasticsearch_connector *connector, const char *index, + const char *type, const char *id, json_t *doc) +{ + int res; + size_t len; + char *url, *json_dump; + + assert(connector); + assert(index); + assert(type); + + /* Format uri: ///id */ + len = strlen(connector->server_address) + strlen(index) + strlen(type) + + (id ? strlen(id) : 0) + 4; + url = malloc(len); + if (!url) { + error("memory error"); + return false; + } + + if (id) { + snprintf(url, len, "%s/%s/%s/%s", connector->server_address, + index, type, id); + } + else { + snprintf(url, len, "%s/%s/%s", connector->server_address, + index, type); + } + + json_dump = json_dumps(doc, JSON_COMPACT); + if (!json_dump) { + error("cannot dump json object"); + free(url); + return false; + } + + res = elasticsearch_post(connector, url, json_dump, NULL); + free(url); + free(json_dump); + json_decref(doc); + + if (check_error()) return false; + + if (res < 200 || res >= 300) { + error("post error: result %d %s", res, url); + return false; + } + + return true; +} + +bool elasticsearch_update_sync(struct elasticsearch_connector *connector, const char *index, const char *type, + const char *id, json_t *doc) +{ + int res; + json_t *json_update; + size_t len; + char *url, *json_dump; + + assert(connector); + assert(index); + assert(type); + assert(id); + + return false; + + /* Format uri: ///id/_update */ + len = strlen(connector->server_address) + strlen(index) + strlen(type) + strlen(id) + 12; + url = malloc(len); + if (!url) { + error("memory error"); + return false; + } + + snprintf(url, len, "%s/%s/%s/%s/_update", connector->server_address, + index, type, id); + + json_update = json_object(); + if (!json_update || json_object_set(json_update, "doc", doc)) { + error("memory error"); + free(url) + return false; + } + + json_decref(doc); + doc = NULL; + + json_dump = json_dumps(json_update, JSON_COMPACT); + if (!json_dump) { + error("cannot dump json object"); + free(url); + return false; + } + + res = elasticsearch_post(connector, url, json_dump, NULL); + free(url); + free(json_dump); + json_decref(doc); + + if (check_error()) return false; + + if (res < 200 || res >= 300) { + error("post error: result %d %s", res); + return false; + } + + return true; +} +#endif diff --git a/modules/misc/elasticsearch/elasticsearch.i b/modules/misc/elasticsearch/elasticsearch.i new file mode 100644 index 00000000..aea1d159 --- /dev/null +++ b/modules/misc/elasticsearch/elasticsearch.i @@ -0,0 +1,78 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +%module elasticsearch + +%{ +#include "haka/elasticsearch.h" + +#include + +#include +%} + +%include "haka/lua/swig.si" +%include "haka/lua/object.si" +%include "json.si" + +%nodefaultctor; +%nodefaultdtor; + +%rename(connector) elasticsearch_connector; + +struct elasticsearch_connector { + %extend { + elasticsearch_connector(const char *address) { + return elasticsearch_connector_new(address); + } + + ~elasticsearch_connector() { + elasticsearch_connector_close($self); + } + + void newindex(const char *index, json_t *data) { + if (!index || !data) { error("invalid parameter"); return; } + + elasticsearch_newindex($self, index, data); + } + + void insert(const char *index, const char *type, const char *id, json_t *data) { + if (!index || !type || !data) { error("invalid parameter"); return; } + + elasticsearch_insert($self, index, type, id, data); + } + + void update(const char *index, const char *type, const char *id, json_t *data) { + if (!index || !type || !id || !data) { error("invalid parameter"); return; } + + elasticsearch_update($self, index, type, id, data); + } + + void timestamp(struct time *time, char **TEMP_OUTPUT) + { + *TEMP_OUTPUT = malloc(TIME_BUFSIZE); + if (!*TEMP_OUTPUT) { + error("memory error"); + return; + } + + if (!elasticsearch_formattimestamp(time, *TEMP_OUTPUT, TIME_BUFSIZE)) { + assert(check_error()); + free(*TEMP_OUTPUT); + *TEMP_OUTPUT = NULL; + } + } + + void genid(char **TEMP_OUTPUT) + { + *TEMP_OUTPUT = malloc(ELASTICSEARCH_ID_LENGTH + 1); + if (!*TEMP_OUTPUT) { + error("memory error"); + return; + } + + elasticsearch_genid(*TEMP_OUTPUT, ELASTICSEARCH_ID_LENGTH); + } + } +}; diff --git a/modules/misc/elasticsearch/haka/elasticsearch.h b/modules/misc/elasticsearch/haka/elasticsearch.h new file mode 100644 index 00000000..e359b70b --- /dev/null +++ b/modules/misc/elasticsearch/haka/elasticsearch.h @@ -0,0 +1,30 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef _ELASTICSEARCH_H_ +#define _ELASTICSEARCH_H_ + +#define ELASTICSEARCH_ID_LENGTH 24 + +#include +#include + +#include "json.h" + + +struct elasticsearch_connector; + +struct elasticsearch_connector *elasticsearch_connector_new(const char *server); +bool elasticsearch_connector_close(struct elasticsearch_connector *connector); +void elasticsearch_genid(char *id, size_t size); +bool elasticsearch_newindex(struct elasticsearch_connector *connector, + const char *index, json_t *data); +bool elasticsearch_formattimestamp(const struct time *time, + char *timestr, size_t size); +bool elasticsearch_insert(struct elasticsearch_connector *connector, + const char *index, const char *type, const char *id, json_t *doc); +bool elasticsearch_update(struct elasticsearch_connector *connector, + const char *index, const char *type, const char *id, json_t *doc); + +#endif /* _ELASTICSEARCH_H_ */ diff --git a/modules/misc/elasticsearch/json.c b/modules/misc/elasticsearch/json.c new file mode 100644 index 00000000..2751a510 --- /dev/null +++ b/modules/misc/elasticsearch/json.c @@ -0,0 +1,98 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "json.h" + +#include +#include + +#include +#include + + +static json_t *lua_element_to_json(struct lua_State *L, int index) +{ + json_t *ret = NULL; + const int val_type = lua_type(L, index); + const int h = lua_gettop(L); + + lua_pushvalue(L, index); + + switch (val_type) { + case LUA_TBOOLEAN: { + const int int_val = lua_toboolean(L, -1); + ret = int_val ? json_true() : json_false(); + if (!ret) error("json boolean conversion error"); + break; + } + case LUA_TSTRING: + case LUA_TUSERDATA: { + const char *str_val = lua_converttostring(L, -1, NULL); + if (!str_val) { + error("cannot convert value to string"); + break; + } + + ret = json_string(str_val); + if (!ret) error("json string conversion error"); + break; + } + case LUA_TNUMBER: { + lua_Number num_val = lua_tonumber(L, -1); + ret = json_real(num_val); + if (!ret) error("json number conversion error"); + break; + } + case LUA_TTABLE: { + ret = json_object(); + if (!ret) { + error("json table conversion error"); + break; + } + + lua_pushnil(L); + while (lua_next(L, -2) != 0) { + size_t len; + const char *str_val = lua_tolstring(L, -2, &len); + json_t *val = lua_element_to_json(L, -1); + if (!val) { + json_decref(ret); + ret = NULL; + break; + } + + if (json_object_set(ret, str_val, val) != 0) { + error("json table conversion error"); + json_decref(val); + json_decref(ret); + ret = NULL; + break; + } + + lua_pop(L, 1); + } + + break; + } + case LUA_TFUNCTION: { + error("function cannot be converted to json"); + break; + } + case LUA_TNIL: { + ret = json_null(); + if (!ret) error("json nil conversion error"); + break; + } + default: + error("invalid value type (%s)", lua_typename(L, val_type)); + } + + lua_settop(L, h); + return ret; +} + +json_t *lua2json(struct lua_State *L, int index) +{ + return lua_element_to_json(L, index); +} diff --git a/modules/misc/elasticsearch/json.h b/modules/misc/elasticsearch/json.h new file mode 100644 index 00000000..50c09cbb --- /dev/null +++ b/modules/misc/elasticsearch/json.h @@ -0,0 +1,11 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include + +#include + +struct lua_State; + +json_t *lua2json(struct lua_State *L, int index); diff --git a/modules/misc/elasticsearch/json.si b/modules/misc/elasticsearch/json.si new file mode 100644 index 00000000..3dcdcfef --- /dev/null +++ b/modules/misc/elasticsearch/json.si @@ -0,0 +1,13 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +%{ +#include "json.h" +%} + +%typemap(in) json_t * +%{ + $1 = lua2json(L, $input); + if (!$1) SWIG_fail; +%} diff --git a/modules/misc/elasticsearch/main.c b/modules/misc/elasticsearch/main.c new file mode 100644 index 00000000..227d55e0 --- /dev/null +++ b/modules/misc/elasticsearch/main.c @@ -0,0 +1,29 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include + +#include + + +static int init(struct parameters *args) +{ + return 0; +} + +static void cleanup() +{ +} + +struct module HAKA_MODULE = { + type: MODULE_EXTENSION, + name: "ElasticSearch connector", + description: "Insert and query an eleastic search server", + api_version: HAKA_API_VERSION, + init: init, + cleanup: cleanup +}; diff --git a/modules/misc/geoip/CMakeLists.txt b/modules/misc/geoip/CMakeLists.txt new file mode 100644 index 00000000..5e6b4c1b --- /dev/null +++ b/modules/misc/geoip/CMakeLists.txt @@ -0,0 +1,30 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_package(LibGeoIP) + +if(LIBGEOIP_FOUND) + INCLUDE_MODULE(ipv4 protocol) + + swig_process(geoipswig lua geoip.i) + + add_library(geoip SHARED + main.c + geoip.c + ${SWIG_geoipswig_FILES} + ) + + include_directories(${LIBGEOIP_INCLUDE_DIR}) + target_link_libraries(geoip LINK_PRIVATE ${LIBGEOIP_LIBRARY}) + + SWIG_FIX_ENTRYPOINT(geoip misc) + + INCLUDE_MODULE(geoip ipv4 protocol) + + INSTALL_MODULE(geoip misc) + + add_subdirectory(test) +else() + message(STATUS "Not building module geoip (missing libraries)") +endif() diff --git a/modules/misc/geoip/doc/geoip.rst b/modules/misc/geoip/doc/geoip.rst new file mode 100644 index 00000000..db8a6d46 --- /dev/null +++ b/modules/misc/geoip/doc/geoip.rst @@ -0,0 +1,52 @@ +.. This Source Code Form is subject to the terms of the Mozilla Public +.. License, v. 2.0. If a copy of the MPL was not distributed with this +.. file, You can obtain one at http://mozilla.org/MPL/2.0/. + +.. highlightlang:: lua + +Geoip +===== + +.. haka:module:: geoip + +GeoIP utility module. + +**Usage:** + +:: + + local geoip = require('misc/geoip') + +API +--- + +.. haka:function:: open(file) -> geoip_handle + + + :param file: GeoIP data file. + :ptype file: string + + Load GeoIP data file. + +.. haka:class:: GeoIPHandle + + .. haka:function:: GeoIPHandle:country(ip) -> country_code + + :param ip: IPv4 address. + :ptype ip: :haka:class:`ipv4.addr` + :return country_code: Result country code or nil if not found. + :rtype country_code: string + + Query the country code for an IP address. + +Example +------- + +:: + + local ipv4 = require('protocol/ipv4') + local geoip_module = require('misc/geoip') + + local geoip = geoip_module.open('/usr/share/GeoIP/GeoIP.dat') + print(geoip:country(ipv4.addr("8.8.8.8"))) + diff --git a/modules/misc/geoip/geoip.c b/modules/misc/geoip/geoip.c new file mode 100644 index 00000000..5299ed76 --- /dev/null +++ b/modules/misc/geoip/geoip.c @@ -0,0 +1,42 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "haka/geoip.h" + +#include + +#include + + +struct geoip_handle *geoip_initialize(const char *database) +{ + GeoIP *geoip_handle = GeoIP_open(database, GEOIP_MEMORY_CACHE); + if (!geoip_handle) { + error("cannot initialize geoip"); + return NULL; + } + + GeoIP_set_charset(geoip_handle, GEOIP_CHARSET_UTF8); + return (struct geoip_handle *)geoip_handle; +} + +void geoip_destroy(struct geoip_handle *geoip) +{ + GeoIP_delete((GeoIP *)geoip); +} + +bool geoip_lookup_country(struct geoip_handle *geoip, ipv4addr addr, + char country_code[3]) +{ + const char *returnedCountry; + + assert(geoip); + assert(country_code); + + returnedCountry = GeoIP_country_code_by_ipnum((GeoIP *)geoip, addr); + if (!returnedCountry) return false; + + memcpy(country_code, returnedCountry, 3); + return true; +} diff --git a/modules/misc/geoip/geoip.i b/modules/misc/geoip/geoip.i new file mode 100644 index 00000000..d6466a4b --- /dev/null +++ b/modules/misc/geoip/geoip.i @@ -0,0 +1,38 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +%module geoip + +%include "haka/lua/swig.si" +%include "haka/lua/object.si" +%include "haka/lua/ipv4-addr.si" + +%{ +#include "haka/geoip.h" +%} + +%nodefaultctor; +%nodefaultdtor; + +struct geoip_handle { + %extend{ + ~geoip_handle() { + geoip_destroy($self); + } + + const char *country(struct ipv4_addr *addr) { + static char country_code[3]; + + if (!geoip_lookup_country($self, addr->addr, country_code)) { + return NULL; + } + + return country_code; + } + } +}; + +%rename(open) geoip_initialize; +%newobject geoip_initialize; +struct geoip_handle *geoip_initialize(const char *database); diff --git a/modules/misc/geoip/haka/geoip.h b/modules/misc/geoip/haka/geoip.h new file mode 100644 index 00000000..cc0c0296 --- /dev/null +++ b/modules/misc/geoip/haka/geoip.h @@ -0,0 +1,21 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef _GEOIP_H_ +#define _GEOIP_H_ + +#include +#include + +#include + +struct geoip_handle; + +struct geoip_handle *geoip_initialize(const char *database); +void geoip_destroy(struct geoip_handle *geoip); + +bool geoip_lookup_country(struct geoip_handle *geoip, ipv4addr addr, + char country_code[3]); + +#endif /* _GEOIP_H_ */ diff --git a/modules/misc/geoip/main.c b/modules/misc/geoip/main.c new file mode 100644 index 00000000..eb82cce7 --- /dev/null +++ b/modules/misc/geoip/main.c @@ -0,0 +1,27 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include + + +static int init(struct parameters *args) +{ + return 0; +} + +static void cleanup() +{ +} + +struct module HAKA_MODULE = { + type: MODULE_EXTENSION, + name: "GeoIP lookup utility", + description: "Query the geoip database on ip addresses", + api_version: HAKA_API_VERSION, + init: init, + cleanup: cleanup +}; diff --git a/modules/misc/geoip/test/CMakeLists.txt b/modules/misc/geoip/test/CMakeLists.txt new file mode 100644 index 00000000..17e80047 --- /dev/null +++ b/modules/misc/geoip/test/CMakeLists.txt @@ -0,0 +1,8 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Tests +include(TestUnitLua) + +TEST_UNIT_LUA(MODULE geoip NAME country FILES country.lua) diff --git a/modules/misc/geoip/test/country.lua b/modules/misc/geoip/test/country.lua new file mode 100644 index 00000000..81d4a944 --- /dev/null +++ b/modules/misc/geoip/test/country.lua @@ -0,0 +1,16 @@ +-- This Source Code Form is subject to the terms of the Mozilla Public +-- License, v. 2.0. If a copy of the MPL was not distributed with this +-- file, You can obtain one at http://mozilla.org/MPL/2.0/. + +local ipv4 = require('protocol/ipv4') +local geoip = require('misc/geoip') + +TestGeoipCountry = {} + +function TestGeoipCountry:test_country_check_no_error() + local db = geoip.open('/usr/share/GeoIP/GeoIP.dat') + + assertEquals(db:country(ipv4.addr('8.8.8.8')), "US") +end + +addTestSuite('TestGeoipCountry') diff --git a/modules/packet/benchmark/CMakeLists.txt b/modules/packet/benchmark/CMakeLists.txt new file mode 100644 index 00000000..92a28ffd --- /dev/null +++ b/modules/packet/benchmark/CMakeLists.txt @@ -0,0 +1,16 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +find_package(PCAP REQUIRED) +if(PCAP_FOUND) + INCLUDE_MODULE(pcap packet) + + add_library(benchmark MODULE main.c) + + include_directories(${PCAP_INCLUDE_DIR}) + target_link_libraries(benchmark ${PCAP_LIBRARY}) + + DEPENDS_MODULE(benchmark packet-pcap packet) + INSTALL_MODULE(benchmark packet) +endif() diff --git a/modules/packet/benchmark/main.c b/modules/packet/benchmark/main.c new file mode 100644 index 00000000..a82c77cf --- /dev/null +++ b/modules/packet/benchmark/main.c @@ -0,0 +1,442 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MODULE "benchmark" + +#define PROGRESS_DELAY 5 /* 5 seconds */ +#define MEBI 1048576.f + +struct pcap_packet { + struct packet core_packet; + struct list list; + struct time timestamp; + struct packet_module_state *state; + uint64 id; + bool captured; + int protocol; +}; + +struct packet_module_state { + struct pcap_capture pd; + uint64 packet_id; + struct pcap_packet *received_head; + struct pcap_packet *current; + struct pcap_packet *received_tail; + int repeated; + size_t size; + bool started; + struct time start; + struct time end; +}; + +/* Init parameters */ +static char *input_file; +static bool passthrough = true; +static int repeat = 1; +static mutex_t stats_lock = MUTEX_INIT; +static struct time start = INVALID_TIME; +static struct time end = INVALID_TIME; +static size_t size; + +static void cleanup() +{ + struct time difftime; + double duration; + double bandwidth; + + time_diff(&difftime, &end, &start); + duration = time_sec(&difftime); + bandwidth = size * 8 / duration / MEBI; + + messagef(HAKA_LOG_INFO, MODULE, + "processing %zd bytes took %lld.%.9u seconds being %02f Mib/s", + size, (int64)difftime.secs, difftime.nsecs, bandwidth); + + free(input_file); +} + +static int init(struct parameters *args) +{ + const char *input; + + if ((input = parameters_get_string(args, "file", NULL))) { + input_file = strdup(input); + } + else { + messagef(HAKA_LOG_ERROR, MODULE, "missing input parameter"); + cleanup(); + return 1; + } + + passthrough = parameters_get_boolean(args, "pass-through", true); + repeat = parameters_get_integer(args, "repeat", 1); + + return 0; +} + +static bool multi_threaded() +{ + return true; +} + +static bool pass_through() +{ + return passthrough; +} + +static void cleanup_state(struct packet_module_state *state) +{ + mutex_lock(&stats_lock); + if (!time_isvalid(&start) || time_cmp(&state->start, &start) < 0) { + start = state->start; + } + if (!time_isvalid(&end) || time_cmp(&state->end, &end) > 0) { + end = state->end; + } + size += state->size * state->repeated; + mutex_unlock(&stats_lock); + + free(state); +} + +static bool load_packet(struct packet_module_state *state) +{ + int ret; + struct pcap_pkthdr *header; + const u_char *p; + struct vbuffer data; + struct vbuffer_sub sub; + size_t data_offset; + + ret = pcap_next_ex(state->pd.pd, &header, &p); + if (ret == -1) { + messagef(HAKA_LOG_ERROR, MODULE, "%s", pcap_geterr(state->pd.pd)); + return 1; + } + else if (ret == -2) { + /* end of pcap file */ + return 1; + } + else if (ret == 0) { + /* Timeout expired. */ + return 0; + } + else if (header->caplen == 0 || + header->len < header->caplen) { + messagef(HAKA_LOG_ERROR, MODULE, "skipping malformed packet %llu", ++state->packet_id); + return 0; + } + else { + struct pcap_packet *packet = malloc(sizeof(struct pcap_packet)); + if (!packet) { + return ENOMEM; + } + + memset(packet, 0, sizeof(struct pcap_packet)); + + if (!vbuffer_create_from(&data, (char *)p, header->caplen)) { + free(packet); + return ENOMEM; + } + + vbuffer_setwritable(&data, !passthrough); + + /* fill packet data structure */ + packet->state = state; + packet->captured = true; + packet->id = ++state->packet_id; + packet->timestamp.secs = header->ts.tv_sec; + packet->timestamp.nsecs = header->ts.tv_usec*1000; + + if (header->caplen < header->len) + messagef(HAKA_LOG_WARNING, MODULE, "packet truncated"); + + packet->protocol = get_protocol(state->pd.link_type, &data, &data_offset); + + vbuffer_sub_create(&sub, &data, data_offset, ALL); + ret = vbuffer_select(&sub, &packet->core_packet.payload, NULL); + vbuffer_release(&data); + if (!ret) { + messagef(HAKA_LOG_ERROR, MODULE, "malformed packet %llu", packet->id); + free(packet); + return ENOMEM; + } + + state->size += header->caplen - data_offset; + + /* Finally insert packet in list */ + list_init(packet); + if (state->received_head) { + list_insert_after(packet, state->received_tail, &state->received_head, &state->received_tail); + } else { + state->received_head = packet; + state->current = packet; + } + state->received_tail = packet; + + return 0; + } +} + +static bool load_pcap(struct packet_module_state *state, const char *input) +{ + size_t cur; + char errbuf[PCAP_ERRBUF_SIZE]; + bzero(errbuf, PCAP_ERRBUF_SIZE); + + assert(input); + + messagef(HAKA_LOG_INFO, MODULE, "opening file '%s'", input); + + state->pd.pd = pcap_open_offline(input, errbuf); + + if (!state->pd.pd) { + messagef(HAKA_LOG_ERROR, MODULE, "%s", errbuf); + return false; + } + + state->pd.file = pcap_file(state->pd.pd); + assert(state->pd.file); + + cur = ftell(state->pd.file); + fseek(state->pd.file, 0L, SEEK_END); + state->pd.file_size = ftell(state->pd.file); + fseek(state->pd.file, cur, SEEK_SET); + + /* Determine the datalink layer type. */ + if ((state->pd.link_type = pcap_datalink(state->pd.pd)) < 0) + { + messagef(HAKA_LOG_ERROR, MODULE, "%s", pcap_geterr(state->pd.pd)); + pcap_close(state->pd.pd); + return false; + } + + /* Check for supported datalink layer. */ + switch (state->pd.link_type) + { + case DLT_EN10MB: + case DLT_NULL: + case DLT_LINUX_SLL: + case DLT_IPV4: + case DLT_RAW: + break; + + case DLT_SLIP: + case DLT_PPP: + default: + messagef(HAKA_LOG_ERROR, MODULE, "%s", "unsupported data link"); + pcap_close(state->pd.pd); + return false; + } + + messagef(HAKA_LOG_INFO, MODULE, "loading packet in memory from '%s'", input); + while(load_packet(state) == 0); + messagef(HAKA_LOG_INFO, MODULE, "loaded %zd bytes in memory", state->size); + + pcap_close(state->pd.pd); + + return true; +} + +static struct packet_module_state *init_state(int thread_id) +{ + struct packet_module_state *state; + + state = malloc(sizeof(struct packet_module_state)); + if (!state) { + error("memory error"); + return NULL; + } + + state->packet_id = 0; + state->repeated = 0; + state->size = 0; + state->started = false; + + bzero(state, sizeof(struct packet_module_state)); + + if (!load_pcap(state, input_file)) { + cleanup_state(state); + return NULL; + } + + return state; +} + +static int packet_do_receive(struct packet_module_state *state, struct packet **pkt) +{ + if (!state->started) { + time_gettimestamp(&state->start); + state->started = true; + } + + if (!state->current) { + state->repeated++; + if (state->repeated < repeat) { + const float percent = state->repeated * 100.f / repeat; + struct time time, difftime; + time_gettimestamp(&time); + + if (time_isvalid(&state->pd.last_progress)) { + time_diff(&difftime, &time, &state->pd.last_progress); + + if (difftime.secs >= PROGRESS_DELAY) { + state->pd.last_progress = time; + if (percent > 0) { + messagef(HAKA_LOG_INFO, MODULE, "progress %.2f %%", percent); + } + } + } else { + state->pd.last_progress = time; + } + + state->current = state->received_head; + } else { + /* No more packet */ + return 1; + } + } + + *pkt = (struct packet *)state->current; + state->current = list_next(state->current); + return 0; + +} + +static void packet_verdict(struct packet *orig_pkt, filter_result result) +{ + struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; + time_gettimestamp(&pkt->state->end); +} + +static const char *packet_get_dissector(struct packet *orig_pkt) +{ + struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; + + switch (pkt->protocol) { + case ETH_P_IP: + return "ipv4"; + + case -1: + messagef(HAKA_LOG_ERROR, MODULE, "malformed packet %llu", pkt->id); + + default: + return NULL; + } +} + +static uint64 packet_get_id(struct packet *orig_pkt) +{ + struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; + return pkt->id; +} + +static void packet_do_release(struct packet *orig_pkt) +{ + /* Nothing to do as packet are in memory and will be released on state + * cleanup */ +} + +static enum packet_status packet_getstate(struct packet *orig_pkt) +{ + struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; + + if (pkt->captured) + return STATUS_NORMAL; + else + return STATUS_FORGED; +} + +static struct packet *new_packet(struct packet_module_state *state, size_t size) +{ + struct pcap_packet *packet = malloc(sizeof(struct pcap_packet)); + if (!packet) { + error("Memory error"); + return NULL; + } + + memset(packet, 0, sizeof(struct pcap_packet)); + + list_init(packet); + packet->state = state; + packet->captured = false; + time_gettimestamp(&packet->timestamp); + packet->id = 0; + + if (!vbuffer_create_new(&packet->core_packet.payload, size, true)) { + assert(check_error()); + free(packet); + return NULL; + } + + return (struct packet *)packet; +} + +static bool send_packet(struct packet *orig_pkt) +{ + error("sending is not supported"); + return false; +} + +static size_t get_mtu(struct packet *pkt) +{ + return 1500; +} + +static const struct time *get_timestamp(struct packet *orig_pkt) +{ + struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; + return &pkt->timestamp; +} + +static bool is_realtime() +{ + return false; +} + +struct packet_module HAKA_MODULE = { + module: { + type: MODULE_PACKET, + name: "Benchmark Module", + description: "Packet capture from memory module", + api_version: HAKA_API_VERSION, + init: init, + cleanup: cleanup + }, + multi_threaded: multi_threaded, + pass_through: pass_through, + is_realtime: is_realtime, + init_state: init_state, + cleanup_state: cleanup_state, + receive: packet_do_receive, + verdict: packet_verdict, + get_id: packet_get_id, + get_dissector: packet_get_dissector, + release_packet: packet_do_release, + packet_getstate: packet_getstate, + new_packet: new_packet, + send_packet: send_packet, + get_mtu: get_mtu, + get_timestamp: get_timestamp +}; diff --git a/modules/packet/nfqueue/CMakeLists.txt b/modules/packet/nfqueue/CMakeLists.txt index b8adf411..4612c382 100644 --- a/modules/packet/nfqueue/CMakeLists.txt +++ b/modules/packet/nfqueue/CMakeLists.txt @@ -35,10 +35,12 @@ int main() include_directories(${NETFILTERQUEUE_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}) target_link_libraries(packet-nfqueue ${NETFILTERQUEUE_LIBRARIES} ${PCAP_LIBRARY}) - + if(NFQ_GET_PAYLOAD_UNSIGNED_CHAR) set_target_properties(packet-nfqueue PROPERTIES COMPILE_DEFINITIONS NFQ_GET_PAYLOAD_UNSIGNED_CHAR) endif(NFQ_GET_PAYLOAD_UNSIGNED_CHAR) INSTALL_MODULE(packet-nfqueue packet) +else() + message(STATUS "Not building module nfqueue (missing libraries)") endif() diff --git a/modules/packet/nfqueue/config.h b/modules/packet/nfqueue/config.h index e016ce51..bfbbe2d0 100644 --- a/modules/packet/nfqueue/config.h +++ b/modules/packet/nfqueue/config.h @@ -5,6 +5,6 @@ #ifndef _CONFIG_H #define _CONFIG_H -#define MODULE_NAME L"nfqueue" +#define MODULE_NAME "nfqueue" #endif /* _CONFIG_H */ diff --git a/modules/packet/nfqueue/iptables.c b/modules/packet/nfqueue/iptables.c index de3dca60..6b707084 100644 --- a/modules/packet/nfqueue/iptables.c +++ b/modules/packet/nfqueue/iptables.c @@ -27,13 +27,13 @@ static pid_t fork_with_pipes(int pipefd_in[2], int pipefd_out[2]) pid_t child_pid; if (pipe(pipefd_in) < 0) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); return -1; } if (pipefd_out) { if (pipe(pipefd_out) < 0) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); close(pipefd_in[0]); close(pipefd_in[1]); return -1; @@ -42,7 +42,7 @@ static pid_t fork_with_pipes(int pipefd_in[2], int pipefd_out[2]) child_pid = fork(); if (child_pid < 0) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); close(pipefd_in[0]); close(pipefd_in[1]); @@ -102,7 +102,7 @@ int apply_iptables(const char *table, const char *conf, bool noflush) ssize_t line_size; FILE *output = fdopen(pipefd_out[0], "r"); if (!output) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"iptables-restore: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "iptables-restore: %s", errno_error(errno)); return errno; } @@ -121,7 +121,7 @@ int apply_iptables(const char *table, const char *conf, bool noflush) while ((line_size = getline(&buffer, &buffer_size, output)) >= 0) { if (line_size > 1) { buffer[line_size-1] = '\0'; - messagef(HAKA_LOG_INFO, MODULE_NAME, L"iptables-restore: %s", buffer); + messagef(HAKA_LOG_INFO, MODULE_NAME, "iptables-restore: %s", buffer); } } @@ -224,7 +224,7 @@ int save_iptables(const char *table, char **conf, bool all_targets) int max_fd, fd_count; if (!input || !err) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"iptables-save: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "iptables-save: %s", errno_error(errno)); fclose(input); fclose(err); return errno; @@ -248,7 +248,7 @@ int save_iptables(const char *table, char **conf, bool all_targets) rc = select(max_fd+1, &curfds, NULL, NULL, NULL); if (rc < 0) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"iptables-save: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "iptables-save: %s", errno_error(errno)); free(buffer); fclose(input); fclose(err); @@ -259,7 +259,7 @@ int save_iptables(const char *table, char **conf, bool all_targets) line_size = getline(&buffer, &buffer_size, err); if (line_size > 1) { buffer[line_size-1] = '\0'; - messagef(HAKA_LOG_INFO, MODULE_NAME, L"iptables-save: %s", buffer); + messagef(HAKA_LOG_INFO, MODULE_NAME, "iptables-save: %s", buffer); } else { FD_CLR(pipefd_err[1], &fds); diff --git a/modules/packet/nfqueue/main.c b/modules/packet/nfqueue/main.c index b2ec0e95..2ceca803 100644 --- a/modules/packet/nfqueue/main.c +++ b/modules/packet/nfqueue/main.c @@ -226,17 +226,17 @@ static int packet_callback(struct nfq_q_handle *qh, struct nfgenmsg *nfmsg, packet_hdr = nfq_get_msg_packet_hdr(nfad); if (!packet_hdr) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"unable to get packet header"); + message(HAKA_LOG_ERROR, MODULE_NAME, "unable to get packet header"); return 0; } packet_len = nfq_get_payload(nfad, &packet_data); if (packet_len > PACKET_BUFFER_SIZE) { - message(HAKA_LOG_WARNING, MODULE_NAME, L"received packet is too large"); + message(HAKA_LOG_WARNING, MODULE_NAME, "received packet is too large"); return 0; } else if (packet_len < 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"unable to get packet payload"); + message(HAKA_LOG_ERROR, MODULE_NAME, "unable to get packet payload"); return 0; } @@ -284,13 +284,13 @@ static int open_send_socket(bool mark) fd = socket(AF_INET, SOCK_RAW, IPPROTO_RAW); if (fd < 0) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"cannot open send socket: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "cannot open send socket: %s", errno_error(errno)); return -1; } if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &one, sizeof(one)) < 0) { close(fd); - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"cannot setup send socket: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "cannot setup send socket: %s", errno_error(errno)); return -1; } @@ -298,7 +298,7 @@ static int open_send_socket(bool mark) one = 0xffff; if (setsockopt(fd, SOL_SOCKET, SO_MARK, &one, sizeof(one)) < 0) { close(fd); - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"cannot setup send socket: %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "cannot setup send socket: %s", errno_error(errno)); return -1; } } @@ -316,7 +316,7 @@ static bool socket_send_packet(int fd, const void *pkt, size_t size) sin.sin_addr.s_addr = iphdr->daddr; if (sendto(fd, pkt, size, 0, (struct sockaddr*)&sin, sizeof(sin)) == (size_t)-1) { - error(L"send failed: %s", errno_error(errno)); + error("send failed: %s", errno_error(errno)); return false; } @@ -340,20 +340,20 @@ static struct packet_module_state *init_state(int thread_id) /* Setup nfqueue connection */ state->handle = nfq_open(); if (!state->handle) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"unable to open nfqueue handle"); + message(HAKA_LOG_ERROR, MODULE_NAME, "unable to open nfqueue handle"); cleanup_state(state); return NULL; } for (i=0; ihandle, proto_family[i]) < 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot unbind queue"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot unbind queue"); cleanup_state(state); return NULL; } if (nfq_bind_pf(state->handle, proto_family[i]) < 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot bind queue"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot bind queue"); cleanup_state(state); return NULL; } @@ -374,14 +374,14 @@ static struct packet_module_state *init_state(int thread_id) state->queue = nfq_create_queue(state->handle, thread_id, &packet_callback, state); if (!state->queue) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot create queue"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot create queue"); cleanup_state(state); return NULL; } if (nfq_set_mode(state->queue, NFQNL_COPY_PACKET, PACKET_BUFFER_SIZE) < 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot set mode to copy packet"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot set mode to copy packet"); cleanup_state(state); return NULL; } @@ -390,7 +390,7 @@ static struct packet_module_state *init_state(int thread_id) /* Change nfq queue len and netfilter receive size */ if (nfq_set_queue_maxlen(state->queue, nfqueue_len) < 0) { - message(HAKA_LOG_WARNING, MODULE_NAME, L"cannot change netfilter queue len"); + message(HAKA_LOG_WARNING, MODULE_NAME, "cannot change netfilter queue len"); } nfnl_rcvbufsiz(nfq_nfnlh(state->handle), nfqueue_len * 1500); @@ -403,13 +403,13 @@ static int open_pcap(struct pcap_dump *pcap, const char *file) if (file) { pcap->pd = pcap_open_dead(DLT_IPV4, PACKET_RECV_SIZE); if (!pcap->pd) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot setup pcap sink"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot setup pcap sink"); return 1; } pcap->pf = pcap_dump_open(pcap->pd, file); if (!pcap->pf) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot setup pcap sink"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot setup pcap sink"); return 1; } } @@ -430,7 +430,7 @@ static void restore_iptables() { if (iptables_saved) { if (apply_iptables("raw", iptables_saved, !iptables_save_need_flush) != 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot restore iptables rules"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot restore iptables rules"); } } } @@ -475,7 +475,7 @@ static int init(struct parameters *args) /* Setup iptables rules */ iptables_save_need_flush = install; if (save_iptables("raw", &iptables_saved, install)) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot save iptables rules"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot save iptables rules"); cleanup(); return 1; } @@ -486,7 +486,7 @@ static int init(struct parameters *args) const char *iter; const char *interfaces = parameters_get_string(args, "interfaces", NULL); if (!interfaces || strlen(interfaces) == 0) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"no interfaces selected"); + message(HAKA_LOG_ERROR, MODULE_NAME, "no interfaces selected"); cleanup(); return 1; } @@ -498,7 +498,7 @@ static int init(struct parameters *args) interfaces_buf = strdup(interfaces); if (!interfaces_buf) { - error(L"memory error"); + error("memory error"); cleanup(); return 1; } @@ -508,13 +508,13 @@ static int init(struct parameters *args) ifaces = malloc((sizeof(char *) * (count + 1))); if (!ifaces) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"memory error"); + message(HAKA_LOG_ERROR, MODULE_NAME, "memory error"); free(interfaces_buf); cleanup(); return 1; } - messagef(HAKA_LOG_INFO, MODULE_NAME, L"installing iptables rules for device(s) %s", interfaces_buf); + messagef(HAKA_LOG_INFO, MODULE_NAME, "installing iptables rules for device(s) %s", interfaces_buf); { int index = 0; @@ -522,7 +522,7 @@ static int init(struct parameters *args) struct ifaddrs *ifa; if (getifaddrs(&ifa)) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); free(interfaces_buf); cleanup(); return 1; @@ -533,7 +533,7 @@ static int init(struct parameters *args) assert(token != NULL); if (!is_iface_valid(ifa, token)) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"'%s' is not a valid network interface", token); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "'%s' is not a valid network interface", token); free(interfaces_buf); cleanup(); return 1; @@ -547,12 +547,12 @@ static int init(struct parameters *args) } if (!install) { - message(HAKA_LOG_WARNING, MODULE_NAME, L"iptables setup rely on user rules"); + message(HAKA_LOG_WARNING, MODULE_NAME, "iptables setup rely on user rules"); } new_iptables_config = iptables_config(ifaces, thread_count, install); if (!new_iptables_config) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot generate iptables rules"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot generate iptables rules"); free(ifaces); free(interfaces_buf); cleanup(); @@ -564,7 +564,7 @@ static int init(struct parameters *args) interfaces_buf = NULL; if (apply_iptables("raw", new_iptables_config, !install)) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"cannot setup iptables rules"); + message(HAKA_LOG_ERROR, MODULE_NAME, "cannot setup iptables rules"); free(new_iptables_config); cleanup(); return 1; @@ -578,12 +578,12 @@ static int init(struct parameters *args) file_in = parameters_get_string(args, "dump_input", NULL); file_out = parameters_get_string(args, "dump_output", NULL); if (!(file_in || file_out)) { - message(HAKA_LOG_WARNING, MODULE_NAME, L"no dump pcap files specified"); + message(HAKA_LOG_WARNING, MODULE_NAME, "no dump pcap files specified"); } else { pcap = malloc(sizeof(struct pcap_sinks)); if (!pcap) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"memory error"); + message(HAKA_LOG_ERROR, MODULE_NAME, "memory error"); cleanup(); return 1; } @@ -591,11 +591,11 @@ static int init(struct parameters *args) if (file_in) { open_pcap(&pcap->in, file_in); - messagef(HAKA_LOG_INFO, MODULE_NAME, L"dumping received packets into '%s'", file_in); + messagef(HAKA_LOG_INFO, MODULE_NAME, "dumping received packets into '%s'", file_in); } if (file_out) { open_pcap(&pcap->out, file_out); - messagef(HAKA_LOG_INFO, MODULE_NAME, L"dumping emitted packets into '%s'", file_out); + messagef(HAKA_LOG_INFO, MODULE_NAME, "dumping emitted packets into '%s'", file_out); } } } @@ -648,7 +648,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** rv = select(max_fd+1, &read_set, NULL, NULL, NULL); if (rv <= 0) { if (rv == -1 && errno != EINTR) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"packet reception failed, %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "packet reception failed, %s", errno_error(errno)); } return 0; } @@ -657,7 +657,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** rv = recv(state->fd, state->receive_buffer, sizeof(state->receive_buffer), 0); if (rv < 0) { if (errno != EINTR) { - messagef(HAKA_LOG_ERROR, MODULE_NAME, L"packet reception failed, %s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "packet reception failed, %s", errno_error(errno)); } return 0; } @@ -685,7 +685,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** } } else { - message(HAKA_LOG_ERROR, MODULE_NAME, L"packet processing failed"); + message(HAKA_LOG_ERROR, MODULE_NAME, "packet processing failed"); return 0; } } @@ -727,7 +727,7 @@ static void packet_verdict(struct packet *orig_pkt, filter_result result) case FILTER_ACCEPT: verdict = NF_ACCEPT; break; case FILTER_DROP: verdict = NF_DROP; break; default: - message(HAKA_LOG_DEBUG, MODULE_NAME, L"unknown verdict"); + message(HAKA_LOG_DEBUG, MODULE_NAME, "unknown verdict"); verdict = NF_DROP; break; } @@ -745,7 +745,7 @@ static void packet_verdict(struct packet *orig_pkt, filter_result result) } if (ret == -1) { - message(HAKA_LOG_ERROR, MODULE_NAME, L"packet verdict failed"); + message(HAKA_LOG_ERROR, MODULE_NAME, "packet verdict failed"); } vbuffer_clear(&pkt->core_packet.payload); @@ -794,7 +794,7 @@ static struct packet *new_packet(struct packet_module_state *state, size_t size) { struct nfqueue_packet *packet = malloc(sizeof(struct nfqueue_packet)); if (!packet) { - error(L"Memory error"); + error("Memory error"); return NULL; } @@ -855,7 +855,7 @@ struct packet_module HAKA_MODULE = { module: { type: MODULE_PACKET, name: MODULE_NAME, - description: L"Netfilter queue packet module", + description: "Netfilter queue packet module", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/packet/pcap/CMakeLists.txt b/modules/packet/pcap/CMakeLists.txt index 00f03c06..6bba0469 100644 --- a/modules/packet/pcap/CMakeLists.txt +++ b/modules/packet/pcap/CMakeLists.txt @@ -4,15 +4,11 @@ find_package(PCAP REQUIRED) if(PCAP_FOUND) - add_library(packet-pcap MODULE main.c) + add_library(packet-pcap SHARED main.c pcap.c) set_target_properties(packet-pcap PROPERTIES OUTPUT_NAME pcap) include_directories(${PCAP_INCLUDE_DIR}) target_link_libraries(packet-pcap ${PCAP_LIBRARY}) INSTALL_MODULE(packet-pcap packet) - - # Test - # include(TestModuleLoad) - # TEST_MODULE_LOAD(packet-pcap) endif() diff --git a/modules/packet/pcap/doc/module.rst b/modules/packet/pcap/doc/module.rst index 455054c4..470fac28 100644 --- a/modules/packet/pcap/doc/module.rst +++ b/modules/packet/pcap/doc/module.rst @@ -10,7 +10,8 @@ Description The module uses the `pcap` library to read packets from a pcap file or from a network interface. -.. note: +.. note:: + To be able to capture packets on a real interface, the process need to be launched with the proper permissions. @@ -32,6 +33,12 @@ Parameters # Capture on all interfaces # interfaces = "any" + .. warning:: + + If the module capture on multiple interfaces and is doing forwarding, + the packet will be received duplicated by haka. It will create problems + with the state-full connection tracking. + .. describe:: file Read packets from a pcap file. @@ -50,3 +57,7 @@ Parameters file = "/tmp/input.pcap" output = "/tmp/output.pcap" + +.. describe:: dump_input=`file` + + Save the received packets to the specified pcap file. diff --git a/modules/packet/pcap/haka/pcap.h b/modules/packet/pcap/haka/pcap.h new file mode 100644 index 00000000..e67c6125 --- /dev/null +++ b/modules/packet/pcap/haka/pcap.h @@ -0,0 +1,36 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef _HAKA_PCAP_TCP_H +#define _HAKA_PCAP_TCP_H + +/* snapshot length - a value of 65535 is sufficient to get all + * of the packet's data on most networks (from pcap man page) + */ +#define SNAPLEN 65535 + +struct pcap_capture { + pcap_t *pd; + int link_type; + FILE *file; + size_t file_size; + struct time last_progress; +}; + +/* + * Packet headers + */ + +struct linux_sll_header { + uint16 type; + uint16 arphdr_type; + uint16 link_layer_length; + uint64 link_layer_address; + uint16 protocol; +} PACKED; + +int get_link_type_offset(int link_type); +int get_protocol(int link_type, struct vbuffer *data, size_t *data_offset); + +#endif /* _HAKA_PCAP_TCP_H */ diff --git a/modules/packet/pcap/main.c b/modules/packet/pcap/main.c index 2db48b66..2ac28f88 100644 --- a/modules/packet/pcap/main.c +++ b/modules/packet/pcap/main.c @@ -2,15 +2,6 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#include -#include -#include -#include -#include -#include -#include -#include - #include #include #include @@ -21,34 +12,19 @@ #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include -/* snapshot length - a value of 65535 is sufficient to get all - * of the packet's data on most networks (from pcap man page) - */ -#define SNAPLEN 65535 - +#define MODULE_NAME "pcap" #define PROGRESS_DELAY 5 /* 5 seconds */ -struct pcap_packet; - -struct pcap_capture { - pcap_t *pd; - int link_type; - FILE *file; - size_t file_size; - struct time last_progress; -}; - -struct packet_module_state { - uint32 pd_count; - struct pcap_capture *pd; - pcap_dumper_t *pf; - uint64 packet_id; - int link_type; - struct pcap_packet *sent_head; - struct pcap_packet *sent_tail; -}; - struct pcap_packet { struct packet core_packet; struct list list; @@ -62,27 +38,25 @@ struct pcap_packet { bool captured; }; -/* - * Packet headers - */ - -struct linux_sll_header { - uint16 type; - uint16 arphdr_type; - uint16 link_layer_length; - uint64 link_layer_address; - uint16 protocol; -} PACKED; - +struct packet_module_state { + uint32 pd_count; + struct pcap_capture *pd; + pcap_dumper_t *pin; + pcap_dumper_t *pout; + uint64 packet_id; + int link_type; + struct pcap_packet *sent_head; + struct pcap_packet *sent_tail; +}; /* Init parameters */ static int input_count; static char **inputs; static bool input_is_iface; -static char *output_file; +static char *output_dump_file; +static char *input_dump_file; static bool passthrough = true; - static void cleanup() { int i; @@ -92,12 +66,13 @@ static void cleanup() } free(inputs); - free(output_file); + free(output_dump_file); + free(input_dump_file); } static int init(struct parameters *args) { - const char *input, *output, *interfaces; + const char *input, *dump, *interfaces; interfaces = parameters_get_string(args, "interfaces", NULL); input = parameters_get_string(args, "file", NULL); @@ -116,7 +91,7 @@ static int init(struct parameters *args) interfaces_buf = strdup(interfaces); if (!interfaces_buf) { - error(L"memory error"); + error("memory error"); cleanup(); return 1; } @@ -125,7 +100,7 @@ static int init(struct parameters *args) inputs = malloc(sizeof(char *)*count); if (!inputs) { free(interfaces_buf); - error(L"memory error"); + error("memory error"); cleanup(); return 1; } @@ -149,14 +124,14 @@ static int init(struct parameters *args) input_count = 1; inputs = malloc(sizeof(char *)); if (!inputs) { - error(L"memory error"); + error("memory error"); cleanup(); return 1; } inputs[0] = strdup(input); if (!inputs[0]) { - error(L"memory error"); + error("memory error"); cleanup(); return 1; } @@ -164,13 +139,17 @@ static int init(struct parameters *args) input_is_iface = false; } else { - messagef(HAKA_LOG_ERROR, L"pcap", L"specifiy either a device or a pcap filename"); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "specifiy either a device or a pcap filename"); cleanup(); return 1; } - if ((output = parameters_get_string(args, "output", NULL))) { - output_file = strdup(output); + if ((dump = parameters_get_string(args, "output", NULL))) { + output_dump_file = strdup(dump); + } + + if ((dump = parameters_get_string(args, "dump_input", NULL))) { + input_dump_file = strdup(dump); } passthrough = parameters_get_boolean(args, "pass-through", true); @@ -192,9 +171,14 @@ static void cleanup_state(struct packet_module_state *state) { int i; - if (state->pf) { - pcap_dump_close(state->pf); - state->pf = NULL; + if (state->pin) { + pcap_dump_close(state->pin); + state->pin = NULL; + } + + if (state->pout) { + pcap_dump_close(state->pout); + state->pout = NULL; } for (i=0; ipd_count; ++i) { @@ -216,15 +200,15 @@ static bool open_pcap(struct pcap_capture *pd, const char *input, bool isiface) assert(input); if (isiface) { - messagef(HAKA_LOG_INFO, L"pcap", L"listening on device %s", input); + messagef(HAKA_LOG_INFO, MODULE_NAME, "listening on device %s", input); pd->pd = pcap_open_live(input, SNAPLEN, 1, 0, errbuf); if (pd->pd && (strlen(errbuf) > 0)) { - messagef(HAKA_LOG_WARNING, L"pcap", L"%s", errbuf); + messagef(HAKA_LOG_WARNING, MODULE_NAME, "%s", errbuf); } } else { - messagef(HAKA_LOG_INFO, L"pcap", L"opening file '%s'", input); + messagef(HAKA_LOG_INFO, MODULE_NAME, "opening file '%s'", input); pd->pd = pcap_open_offline(input, errbuf); @@ -242,14 +226,14 @@ static bool open_pcap(struct pcap_capture *pd, const char *input, bool isiface) } if (!pd->pd) { - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", errbuf); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errbuf); return false; } /* Determine the datalink layer type. */ if ((pd->link_type = pcap_datalink(pd->pd)) < 0) { - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", pcap_geterr(pd->pd)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", pcap_geterr(pd->pd)); return false; } @@ -266,13 +250,29 @@ static bool open_pcap(struct pcap_capture *pd, const char *input, bool isiface) case DLT_SLIP: case DLT_PPP: default: - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", "unsupported data link"); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", "unsupported data link"); return false; } return true; } +static pcap_dumper_t *open_dump_file(struct packet_module_state *state, const char *filename) +{ + pcap_dumper_t *dump; + + if (!filename) return NULL; + + dump = pcap_dump_open(state->pd[0].pd, filename); + if (!dump) { + cleanup_state(state); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "unable to dump on %s", filename); + return NULL; + } + + return dump; +} + static struct packet_module_state *init_state(int thread_id) { struct packet_module_state *state; @@ -282,7 +282,7 @@ static struct packet_module_state *init_state(int thread_id) state = malloc(sizeof(struct packet_module_state)); if (!state) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -290,7 +290,7 @@ static struct packet_module_state *init_state(int thread_id) state->pd = malloc(sizeof(struct pcap_capture)*input_count); if (!state->pd) { - error(L"memory error"); + error("memory error"); free(state); return NULL; } @@ -307,103 +307,19 @@ static struct packet_module_state *init_state(int thread_id) state->link_type = state->pd[0].link_type; - if (output_file) { - /* open pcap savefile */ - state->pf = pcap_dump_open(state->pd[0].pd, output_file); - if (!state->pf) { - cleanup_state(state); - messagef(HAKA_LOG_ERROR, L"pcap", L"unable to dump on %s", output_file); - return NULL; - } - } + state->pin = open_dump_file(state, input_dump_file); + state->pout = open_dump_file(state, output_dump_file); state->packet_id = 0; return state; } -static int get_link_type_offset(struct pcap_packet *pkt) -{ - size_t size; - - switch (pkt->link_type) - { - case DLT_LINUX_SLL: size = sizeof(struct linux_sll_header); break; - case DLT_EN10MB: size = sizeof(struct ethhdr); break; - case DLT_IPV4: - case DLT_RAW: size = 0; break; - case DLT_NULL: size = 4; break; - - default: assert(!"unsupported link type"); return -1; - } - - return size; -} - -static int get_protocol(struct pcap_packet *pkt, size_t *data_offset) -{ - size_t len, size; - struct vbuffer_sub sub; - const uint8* data = NULL; - - size = get_link_type_offset(pkt); - *data_offset = size; - - if (size > 0) { - vbuffer_sub_create(&sub, &pkt->data, 0, size); - data = vbuffer_sub_flatten(&sub, &len); - - if (len < *data_offset) { - messagef(HAKA_LOG_ERROR, L"pcap", L"malformed packet %d", pkt->id); - return -1; - } - - assert(data); - } - - switch (pkt->link_type) - { - case DLT_LINUX_SLL: - { - struct linux_sll_header *eh = (struct linux_sll_header *)data; - if (eh) return ntohs(eh->protocol); - else return 0; - } - break; - - case DLT_EN10MB: - { - struct ethhdr *eh = (struct ethhdr *)data; - if (eh) return ntohs(eh->h_proto); - else return 0; - } - break; - - case DLT_IPV4: - case DLT_RAW: - return ETH_P_IP; - - case DLT_NULL: - *data_offset = 4; - if (*(uint32 *)data == PF_INET) { - return ETH_P_IP; - } - else { - return -1; - } - break; - - default: - assert(!"unsupported link type"); - return -1; - } -} - static bool packet_build_payload(struct pcap_packet *packet) { struct vbuffer_sub sub; size_t data_offset; - if (get_protocol(packet, &data_offset) < 0) { + if (get_protocol(packet->link_type, &packet->data, &data_offset) < 0) { return false; } @@ -436,7 +352,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** for (i=0; ipd_count; ++i) { const int fd = pcap_get_selectable_fd(state->pd[i].pd); if (fd < 0) { - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); return 1; } else { @@ -454,7 +370,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** return 0; } else { - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", errno_error(errno)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", errno_error(errno)); return 1; } } @@ -481,7 +397,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** ret = pcap_next_ex(pd->pd, &header, &p); if (ret == -1) { - messagef(HAKA_LOG_ERROR, L"pcap", L"%s", pcap_geterr(pd->pd)); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "%s", pcap_geterr(pd->pd)); return 1; } else if (ret == -2) { @@ -494,7 +410,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** } else if (header->caplen == 0 || header->len < header->caplen) { - messagef(HAKA_LOG_ERROR, L"pcap", L"skipping malformed packet %d", ++state->packet_id); + messagef(HAKA_LOG_ERROR, MODULE_NAME, "skipping malformed packet %llu", ++state->packet_id); return 0; } else { @@ -505,6 +421,10 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** memset(packet, 0, sizeof(struct pcap_packet)); + if (state->pin) { + pcap_dump((u_char *)state->pin, header, p); + } + list_init(packet); if (!vbuffer_create_from(&packet->data, (char *)p, header->caplen)) { @@ -524,7 +444,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** packet->timestamp.nsecs = header->ts.tv_usec*1000; if (packet->header.caplen < packet->header.len) - messagef(HAKA_LOG_WARNING, L"pcap", L"packet truncated"); + messagef(HAKA_LOG_WARNING, MODULE_NAME, "packet truncated"); if (pd->file) { const size_t cur = ftell(pd->file); @@ -539,7 +459,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** { pd->last_progress = time; if (percent > 0) { - messagef(HAKA_LOG_INFO, L"pcap", L"progress %.2f %%", percent); + messagef(HAKA_LOG_INFO, MODULE_NAME, "progress %.2f %%", percent); } } } @@ -549,6 +469,7 @@ static int packet_do_receive(struct packet_module_state *state, struct packet ** } if (!packet_build_payload(packet)) { + messagef(HAKA_LOG_ERROR, MODULE_NAME, "malformed packet %llu", packet->id); vbuffer_release(&packet->data); free(packet); return ENOMEM; @@ -569,7 +490,7 @@ static void packet_verdict(struct packet *orig_pkt, filter_result result) if (vbuffer_isvalid(&pkt->data)) { vbuffer_restore(&pkt->select, &pkt->core_packet.payload, false); - if (pkt->state->pf && result == FILTER_ACCEPT) { + if (pkt->state->pout && result == FILTER_ACCEPT) { const uint8 *data; size_t len; @@ -585,7 +506,7 @@ static void packet_verdict(struct packet *orig_pkt, filter_result result) pkt->header.caplen = len; } - pcap_dump((u_char *)pkt->state->pf, &(pkt->header), data); + pcap_dump((u_char *)pkt->state->pout, &(pkt->header), data); } vbuffer_clear(&pkt->data); @@ -596,10 +517,13 @@ static const char *packet_get_dissector(struct packet *orig_pkt) { struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; size_t data_offset; - switch (get_protocol(pkt, &data_offset)) { + switch (get_protocol(pkt->link_type, &pkt->data, &data_offset)) { case ETH_P_IP: return "ipv4"; + case -1: + messagef(HAKA_LOG_ERROR, MODULE_NAME, "malformed packet %llu", pkt->id); + default: return NULL; } @@ -646,7 +570,7 @@ static struct packet *new_packet(struct packet_module_state *state, size_t size) struct vbuffer_sub sub; struct pcap_packet *packet = malloc(sizeof(struct pcap_packet)); if (!packet) { - error(L"Memory error"); + error("Memory error"); return NULL; } @@ -658,7 +582,7 @@ static struct packet *new_packet(struct packet_module_state *state, size_t size) packet->link_type = state->link_type; time_gettimestamp(&packet->timestamp); - data_offset = get_link_type_offset(packet); + data_offset = get_link_type_offset(packet->link_type); size += data_offset; if (!vbuffer_create_new(&packet->data, size, true)) { @@ -720,7 +644,7 @@ static bool send_packet(struct packet *orig_pkt) struct pcap_packet *pkt = (struct pcap_packet*)orig_pkt; if (passthrough) { - error(L"sending is not supported in pass-through"); + error("sending is not supported in pass-through"); return false; } @@ -748,12 +672,11 @@ static bool is_realtime() return input_is_iface; } - struct packet_module HAKA_MODULE = { module: { type: MODULE_PACKET, - name: L"Pcap Module", - description: L"Pcap packet module", + name: "Pcap Module", + description: "Pcap packet module", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/packet/pcap/pcap.c b/modules/packet/pcap/pcap.c new file mode 100644 index 00000000..ca87d518 --- /dev/null +++ b/modules/packet/pcap/pcap.c @@ -0,0 +1,94 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +int get_link_type_offset(int link_type) +{ + size_t size; + + switch (link_type) + { + case DLT_LINUX_SLL: size = sizeof(struct linux_sll_header); break; + case DLT_EN10MB: size = sizeof(struct ethhdr); break; + case DLT_IPV4: + case DLT_RAW: size = 0; break; + case DLT_NULL: size = 4; break; + + default: assert(!"unsupported link type"); return -1; + } + + return size; +} + +int get_protocol(int link_type, struct vbuffer *packet_buffer, size_t *data_offset) +{ + size_t len, size; + struct vbuffer_sub sub; + const uint8* data = NULL; + + size = get_link_type_offset(link_type); + *data_offset = size; + + if (size > 0) { + vbuffer_sub_create(&sub, packet_buffer, 0, size); + data = vbuffer_sub_flatten(&sub, &len); + + if (len < *data_offset) { + return -1; + } + + assert(packet_buffer); + } + + switch (link_type) + { + case DLT_LINUX_SLL: + { + struct linux_sll_header *eh = (struct linux_sll_header *)data; + if (eh) return ntohs(eh->protocol); + else return 0; + } + break; + + case DLT_EN10MB: + { + struct ethhdr *eh = (struct ethhdr *)data; + if (eh) return ntohs(eh->h_proto); + else return 0; + } + break; + + case DLT_IPV4: + case DLT_RAW: + return ETH_P_IP; + + case DLT_NULL: + *data_offset = 4; + if (*(uint32 *)data == PF_INET) { + return ETH_P_IP; + } + else { + return -1; + } + break; + + default: + assert(!"unsupported link type"); + return -1; + } +} diff --git a/modules/protocol/ipv4/cnx.c b/modules/protocol/ipv4/cnx.c index 5facdc09..d4dd73c2 100644 --- a/modules/protocol/ipv4/cnx.c +++ b/modules/protocol/ipv4/cnx.c @@ -39,7 +39,7 @@ struct cnx_table *cnx_table_new(void (*cnx_release)(struct cnx *, bool)) { struct cnx_table *table = malloc(sizeof(struct cnx_table)); if (!table) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -150,14 +150,14 @@ struct cnx *cnx_new(struct cnx_table *table, struct cnx_key *key) cnx_release(table, elem, true); } else { - error(L"cnx already exists"); + error("cnx already exists"); return NULL; } } elem = malloc(sizeof(struct cnx_table_elem)); if (!elem) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -181,7 +181,7 @@ struct cnx *cnx_new(struct cnx_table *table, struct cnx_key *key) ipv4_addr_to_string(elem->cnx.key.srcip, srcip, IPV4_ADDR_STRING_MAXLEN+1); ipv4_addr_to_string(elem->cnx.key.dstip, dstip, IPV4_ADDR_STRING_MAXLEN+1); - messagef(HAKA_LOG_DEBUG, L"cnx", L"opening connection %s:%u -> %s:%u", + messagef(HAKA_LOG_DEBUG, "cnx", "opening connection %s:%u -> %s:%u", srcip, elem->cnx.key.srcport, dstip, elem->cnx.key.dstport); } @@ -258,7 +258,7 @@ void cnx_close(struct cnx* cnx) ipv4_addr_to_string(elem->cnx.key.srcip, srcip, IPV4_ADDR_STRING_MAXLEN+1); ipv4_addr_to_string(elem->cnx.key.dstip, dstip, IPV4_ADDR_STRING_MAXLEN+1); - messagef(HAKA_LOG_DEBUG, L"cnx", L"closing connection %s:%u -> %s:%u", + messagef(HAKA_LOG_DEBUG, "cnx", "closing connection %s:%u -> %s:%u", srcip, elem->cnx.key.srcport, dstip, elem->cnx.key.dstport); } @@ -277,7 +277,7 @@ void cnx_drop(struct cnx *cnx) ipv4_addr_to_string(elem->cnx.key.srcip, srcip, IPV4_ADDR_STRING_MAXLEN+1); ipv4_addr_to_string(elem->cnx.key.dstip, dstip, IPV4_ADDR_STRING_MAXLEN+1); - messagef(HAKA_LOG_DEBUG, L"cnx", L"dropping connection %s:%u -> %s:%u", + messagef(HAKA_LOG_DEBUG, "cnx", "dropping connection %s:%u -> %s:%u", srcip, elem->cnx.key.srcport, dstip, elem->cnx.key.dstport); } diff --git a/modules/protocol/ipv4/cnx.si b/modules/protocol/ipv4/cnx.si index a78fb0e4..d3b02b15 100644 --- a/modules/protocol/ipv4/cnx.si +++ b/modules/protocol/ipv4/cnx.si @@ -14,6 +14,7 @@ %} %include "haka/lua/cnx.si" +%include "haka/lua/ref.si" struct cnx_table { %extend { diff --git a/modules/protocol/ipv4/haka/ipv4-addr.h b/modules/protocol/ipv4/haka/ipv4-addr.h index 3be9955d..29fc9331 100644 --- a/modules/protocol/ipv4/haka/ipv4-addr.h +++ b/modules/protocol/ipv4/haka/ipv4-addr.h @@ -11,6 +11,7 @@ #define _HAKA_PROTO_IPV4_ADDR_H #include +#include /** diff --git a/modules/protocol/ipv4/haka/ipv4.h b/modules/protocol/ipv4/haka/ipv4.h index 0e89ad8f..2813ab46 100644 --- a/modules/protocol/ipv4/haka/ipv4.h +++ b/modules/protocol/ipv4/haka/ipv4.h @@ -29,7 +29,7 @@ #define IPV4_GET_BITS(type, v, r) GET_BITS(SWAP_FROM_BE(type, v), r) #define IPV4_SET_BITS(type, v, r, x) SWAP_TO_BE(type, SET_BITS(SWAP_FROM_BE(type, v), r, x)) -#define IPV4_CHECK(ip, ...) if (!(ip) || !(ip)->packet) { error(L"invalid ipv4 packet"); return __VA_ARGS__; } +#define IPV4_CHECK(ip, ...) if (!(ip) || !(ip)->packet) { error("invalid ipv4 packet"); return __VA_ARGS__; } #define IPV4_FLAG_RB 15 #define IPV4_FLAG_DF 15-1 diff --git a/modules/protocol/ipv4/haka/lua/ipv4-addr.si b/modules/protocol/ipv4/haka/lua/ipv4-addr.si index fd8ab77d..ee29c638 100644 --- a/modules/protocol/ipv4/haka/lua/ipv4-addr.si +++ b/modules/protocol/ipv4/haka/lua/ipv4-addr.si @@ -4,7 +4,12 @@ %{ -struct ipv4_addr; +#include + +struct ipv4_addr { + ipv4addr addr; +}; + struct ipv4_addr *ipv4_addr_new(ipv4addr a); %} diff --git a/modules/protocol/ipv4/ipv4-addr.c b/modules/protocol/ipv4/ipv4-addr.c index d97bf86c..db87a667 100644 --- a/modules/protocol/ipv4/ipv4-addr.c +++ b/modules/protocol/ipv4/ipv4-addr.c @@ -10,13 +10,12 @@ #include #include - uint32 ipv4_addr_from_string(const char *string) { struct in_addr addr; if (inet_pton(AF_INET, string, &addr) <= 0) { - error(L"invalid IPv4 address format"); + error("invalid IPv4 address format"); return 0; } diff --git a/modules/protocol/ipv4/ipv4-network.c b/modules/protocol/ipv4/ipv4-network.c index f8b458e5..529951ec 100644 --- a/modules/protocol/ipv4/ipv4-network.c +++ b/modules/protocol/ipv4/ipv4-network.c @@ -35,13 +35,13 @@ ipv4network ipv4_network_from_string(const char *string) { int8 * ptr; if (!(ptr = strchr(string, '/'))) { - error(L"Invalid IPv4 network address format"); + error("Invalid IPv4 network address format"); return ipv4_network_zero; } int32 slash_index = ptr - string; if (slash_index > IPV4_ADDR_STRING_MAXLEN) { - error(L"Invalid IPv4 network address format"); + error("Invalid IPv4 network address format"); return ipv4_network_zero; } @@ -57,7 +57,7 @@ ipv4network ipv4_network_from_string(const char *string) if ((sscanf(string + slash_index, "/%hhu", &netaddr.mask) != 1) || (netaddr.mask > 32 || netaddr.mask < 0)) { - error(L"Invalid IPv4 network address format"); + error("Invalid IPv4 network address format"); return ipv4_network_zero; } @@ -66,7 +66,7 @@ ipv4network ipv4_network_from_string(const char *string) if (maskedaddr != netaddr.net) { netaddr.net = maskedaddr; - message(HAKA_LOG_WARNING, L"ipv4" , L"Incorrect network mask"); + message(HAKA_LOG_WARNING, "ipv4" , "Incorrect network mask"); } return netaddr; diff --git a/modules/protocol/ipv4/ipv4.c b/modules/protocol/ipv4/ipv4.c index 48d713ff..af4297a5 100644 --- a/modules/protocol/ipv4/ipv4.c +++ b/modules/protocol/ipv4/ipv4.c @@ -41,7 +41,7 @@ static struct ipv4_frag_table *ipv4_frag_table_new() { struct ipv4_frag_table *table = malloc(sizeof(struct ipv4_frag_table)); if (!table) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -58,7 +58,7 @@ static struct ipv4_frag_table *ipv4_frag_table_new() return table; } -static void raise_alert(struct ipv4 *ip, wchar_t *message) +static void raise_alert(struct ipv4 *ip, char *message) { if (ip) { ALERT(invalid_packet, 1, 1) @@ -66,8 +66,8 @@ static void raise_alert(struct ipv4 *ip, wchar_t *message) severity: HAKA_ALERT_LOW, ENDALERT - TOWSTR(srcip, ipv4addr, ipv4_get_src(ip)); - TOWSTR(dstip, ipv4addr, ipv4_get_dst(ip)); + TOSTR(srcip, ipv4addr, ipv4_get_src(ip)); + TOSTR(dstip, ipv4addr, ipv4_get_dst(ip)); ALERT_NODE(invalid_packet, sources, 0, HAKA_ALERT_NODE_ADDRESS, srcip); ALERT_NODE(invalid_packet, targets, 0, HAKA_ALERT_NODE_ADDRESS, dstip); @@ -166,7 +166,7 @@ static bool ipv4_frag_insert(struct ipv4_frag_elem *elem, struct ipv4 *pkt) cur = list2_get(iter, struct ipv4, frag_list); iter = list2_erase(iter); - raise_alert(cur, L"invalid ipv4 fragment"); + raise_alert(cur, "invalid ipv4 fragment"); ipv4_action_drop(cur); ipv4_release(cur); } @@ -181,7 +181,7 @@ static bool ipv4_frag_insert(struct ipv4_frag_elem *elem, struct ipv4 *pkt) * packet does have the mf flag set. */ struct ipv4 *last = list2_get(list2_prev(end), struct ipv4, frag_list); if (!ipv4_get_flags_mf(last)) { - raise_alert(pkt, L"invalid ipv4 fragment"); + raise_alert(pkt, "invalid ipv4 fragment"); ipv4_action_drop(pkt); ipv4_release(pkt); return false; @@ -217,7 +217,7 @@ static struct ipv4_frag_elem *ipv4_frag_table_insert(struct ipv4_frag_table *tab else { ptr = malloc(sizeof(struct ipv4_frag_elem)); if (!ptr) { - error(L"memory error"); + error("memory error"); mutex_unlock(&table->mutex); return false; } @@ -304,7 +304,7 @@ struct ipv4 *ipv4_dissect(struct packet *packet) } if (!vbuffer_check_size(payload, sizeof(struct ipv4_header), NULL)) { - raise_alert(NULL, L"corrupted ip packet, size is too small"); + raise_alert(NULL, "corrupted ip packet, size is too small"); packet_drop(packet); packet_release(packet); @@ -313,7 +313,7 @@ struct ipv4 *ipv4_dissect(struct packet *packet) ip = malloc(sizeof(struct ipv4)); if (!ip) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -328,7 +328,7 @@ struct ipv4 *ipv4_dissect(struct packet *packet) header_len = hdrlen.hdr_len << IPV4_HDR_LEN_OFFSET; if (header_len < sizeof(struct ipv4_header)) { - raise_alert(NULL, L"corrupted ip packet"); + raise_alert(NULL, "corrupted ip packet"); packet_drop(packet); packet_release(packet); @@ -346,7 +346,7 @@ struct ipv4 *ipv4_dissect(struct packet *packet) * as the packet might contains some padding. */ if (vbuffer_size(payload) < ipv4_get_len(ip)) { - raise_alert(ip, L"invalid ip packet, invalid size is too small"); + raise_alert(ip, "invalid ip packet, invalid size is too small"); packet_drop(packet); packet_release(packet); diff --git a/modules/protocol/ipv4/ipv4.i b/modules/protocol/ipv4/ipv4.i index fdc3ddfe..2104af73 100644 --- a/modules/protocol/ipv4/ipv4.i +++ b/modules/protocol/ipv4/ipv4.i @@ -4,6 +4,13 @@ %module ipv4 +%include "haka/lua/swig.si" +%include "haka/lua/object.si" +%include "haka/lua/packet.si" +%include "haka/lua/ref.si" +%include "haka/lua/ipv4.si" +%include "haka/lua/ipv4-addr.si" + %{ #include "haka/ipv4.h" #include "haka/lua/state.h" @@ -11,10 +18,6 @@ struct ipv4_flags; struct ipv4_payload; - struct ipv4_addr { - ipv4addr addr; - }; - struct ipv4_network { ipv4network net; }; @@ -48,13 +51,6 @@ } %} -%include "haka/lua/swig.si" -%include "haka/lua/object.si" -%include "haka/lua/packet.si" -%include "haka/lua/ref.si" -%include "haka/lua/ipv4.si" -%include "haka/lua/ipv4-addr.si" - %rename(addr) ipv4_addr; struct ipv4_addr { @@ -63,7 +59,7 @@ struct ipv4_addr { struct ipv4_addr *ret; if (!str) { - error(L"invalid parameter"); + error("invalid parameter"); return NULL; } @@ -87,7 +83,7 @@ struct ipv4_addr { ipv4_addr(unsigned int a, unsigned int b, unsigned int c, unsigned int d) { if (a > 255 || b > 255 || c > 255 || d > 255) { - error(L"invalid IPv4 address format"); + error("invalid IPv4 address format"); return NULL; } @@ -129,7 +125,7 @@ struct ipv4_addr { { *TEMP_OUTPUT = malloc(IPV4_ADDR_STRING_MAXLEN + 1); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } @@ -146,7 +142,7 @@ struct checksum_partial { checksum_partial() { struct checksum_partial *ret = malloc(sizeof(struct checksum_partial)); if (!ret) { - error(L"memory error"); + error("memory error"); return NULL; } *ret = checksum_partial_init; @@ -190,7 +186,7 @@ struct ipv4_network { struct ipv4_network *ret; if (!str) { - error(L"invalid parameter"); + error("invalid parameter"); return NULL; } @@ -210,7 +206,7 @@ struct ipv4_network { ipv4_network(struct ipv4_addr addr, unsigned char mask) { if (mask < 0 || mask > 32) { - error(L"Invalid IPv4 addresss network format"); + error("Invalid IPv4 addresss network format"); return NULL; } @@ -232,7 +228,7 @@ struct ipv4_network { { *TEMP_OUTPUT = malloc(IPV4_NETWORK_STRING_MAXLEN + 1); if (!*TEMP_OUTPUT) { - error(L"memory error"); + error("memory error"); return; } @@ -244,7 +240,7 @@ struct ipv4_network { bool _contains(struct ipv4_addr *addr) { if (!addr) { - error(L"nil argument"); + error("nil argument"); return false; } return ipv4_network_contains($self->net, addr->addr); diff --git a/modules/protocol/ipv4/main.c b/modules/protocol/ipv4/main.c index 46efdab7..4a2dd9db 100644 --- a/modules/protocol/ipv4/main.c +++ b/modules/protocol/ipv4/main.c @@ -16,8 +16,8 @@ static void cleanup() struct module HAKA_MODULE = { type: MODULE_EXTENSION, - name: L"IPv4", - description: L"IPv4 protocol", + name: "IPv4", + description: "IPv4 protocol", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/protocol/raw/main.c b/modules/protocol/raw/main.c index c44336fa..54af99f9 100644 --- a/modules/protocol/raw/main.c +++ b/modules/protocol/raw/main.c @@ -16,8 +16,8 @@ static void cleanup() struct module HAKA_MODULE = { type: MODULE_EXTENSION, - name: L"Raw", - description: L"Raw packet protocol", + name: "Raw", + description: "Raw packet protocol", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/protocol/raw/test/basic-ref.txt b/modules/protocol/raw/test/basic-ref.txt index 622e0922..354200e0 100644 --- a/modules/protocol/raw/test/basic-ref.txt +++ b/modules/protocol/raw/test/basic-ref.txt @@ -1,4 +1,6 @@ userdata packet { + data : table { + } id : 1 payload : userdata vbuffer { modified : false @@ -6,6 +8,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:27 2013 } userdata packet { + data : table { + } id : 2 payload : userdata vbuffer { modified : false @@ -13,6 +17,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:27 2013 } userdata packet { + data : table { + } id : 3 payload : userdata vbuffer { modified : false @@ -20,6 +26,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:28 2013 } userdata packet { + data : table { + } id : 4 payload : userdata vbuffer { modified : false @@ -27,6 +35,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:28 2013 } userdata packet { + data : table { + } id : 5 payload : userdata vbuffer { modified : false @@ -34,6 +44,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:31 2013 } userdata packet { + data : table { + } id : 6 payload : userdata vbuffer { modified : false @@ -41,6 +53,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:31 2013 } userdata packet { + data : table { + } id : 7 payload : userdata vbuffer { modified : false @@ -48,6 +62,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:36 2013 } userdata packet { + data : table { + } id : 8 payload : userdata vbuffer { modified : false @@ -55,6 +71,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:36 2013 } userdata packet { + data : table { + } id : 9 payload : userdata vbuffer { modified : false @@ -62,6 +80,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:46 2013 } userdata packet { + data : table { + } id : 10 payload : userdata vbuffer { modified : false @@ -69,6 +89,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:46 2013 } userdata packet { + data : table { + } id : 11 payload : userdata vbuffer { modified : false @@ -76,6 +98,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:46 2013 } userdata packet { + data : table { + } id : 12 payload : userdata vbuffer { modified : false @@ -83,6 +107,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:46 2013 } userdata packet { + data : table { + } id : 13 payload : userdata vbuffer { modified : false @@ -90,6 +116,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:50 2013 } userdata packet { + data : table { + } id : 14 payload : userdata vbuffer { modified : false @@ -97,6 +125,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:50 2013 } userdata packet { + data : table { + } id : 15 payload : userdata vbuffer { modified : false @@ -104,6 +134,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:51 2013 } userdata packet { + data : table { + } id : 16 payload : userdata vbuffer { modified : false @@ -111,6 +143,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:51 2013 } userdata packet { + data : table { + } id : 17 payload : userdata vbuffer { modified : false @@ -118,6 +152,8 @@ userdata packet { timestamp : userdata time Mon Nov 4 15:49:51 2013 } userdata packet { + data : table { + } id : 18 payload : userdata vbuffer { modified : false diff --git a/modules/protocol/tcp/doc/tcp_connection.rst b/modules/protocol/tcp/doc/tcp_connection.rst index ddf2eb57..015076b9 100644 --- a/modules/protocol/tcp/doc/tcp_connection.rst +++ b/modules/protocol/tcp/doc/tcp_connection.rst @@ -94,7 +94,6 @@ Events :module: :objtype: event - **Event options:** .. haka:data:: streamed @@ -129,6 +128,20 @@ Events end } +.. haka:function:: tcp_connection.events.receive_packet(flow, packet, direction) + :module: + :objtype: event + + :param flow: TCP flow. + :paramtype flow: :haka:class:`TcpConnectionDissector` + :param packet: Current receive TCP packet. + :ptype packet: :haka:class:`TcpDissector` + :param direction: Data direction (``'up'`` or ``'down'``). + :paramtype direction: string + + Event triggered when a packet associated with the stream is received. + + Helper ------ diff --git a/modules/protocol/tcp/haka/tcp.h b/modules/protocol/tcp/haka/tcp.h index 1b87e931..7f875cf2 100644 --- a/modules/protocol/tcp/haka/tcp.h +++ b/modules/protocol/tcp/haka/tcp.h @@ -17,7 +17,7 @@ #define TCP_GET_BITS(type, v, r) GET_BITS(SWAP_FROM_BE(type, v), r) #define TCP_SET_BITS(type, v, r, x) SWAP_TO_BE(type, SET_BITS(SWAP_FROM_BE(type, v), r, x)) -#define TCP_CHECK(tcp, ...) if (!(tcp) || !(tcp)->packet) { error(L"invalid tcp packet"); return __VA_ARGS__; } +#define TCP_CHECK(tcp, ...) if (!(tcp) || !(tcp)->packet) { error("invalid tcp packet"); return __VA_ARGS__; } #define TCP_FLAGS_BITS 0, 8 #define TCP_FLAGS_START 13 diff --git a/modules/protocol/tcp/main.c b/modules/protocol/tcp/main.c index 941178dd..a388cc9e 100644 --- a/modules/protocol/tcp/main.c +++ b/modules/protocol/tcp/main.c @@ -16,8 +16,8 @@ static void cleanup() struct module HAKA_MODULE = { type: MODULE_EXTENSION, - name: L"TCP", - description: L"TCP protocol", + name: "TCP", + description: "TCP protocol", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup diff --git a/modules/protocol/tcp/tcp-stream.c b/modules/protocol/tcp/tcp-stream.c index b64cc175..97898fc6 100644 --- a/modules/protocol/tcp/tcp-stream.c +++ b/modules/protocol/tcp/tcp-stream.c @@ -110,13 +110,13 @@ bool tcp_stream_push(struct tcp_stream *stream, struct tcp *tcp, struct vbuffer_ uint64 ref_seq; if (stream->start_seq == (size_t)-1) { - error(L"uninitialized stream"); + error("uninitialized stream"); return false; } chunk = malloc(sizeof(struct tcp_stream_chunk)); if (!chunk) { - error(L"memory error"); + error("memory error"); return false; } @@ -174,7 +174,7 @@ bool tcp_stream_push(struct tcp_stream *stream, struct tcp *tcp, struct vbuffer_ } if (iter != end && chunk->end_seq > qchunk->start_seq) { - message(HAKA_LOG_WARNING, L"tcp_connection", L"retransmit packet (ignored)"); + message(HAKA_LOG_WARNING, "tcp_connection", "retransmit packet (ignored)"); tcp_stream_chunk_free(chunk); return false; } @@ -182,7 +182,7 @@ bool tcp_stream_push(struct tcp_stream *stream, struct tcp *tcp, struct vbuffer_ list2_insert(iter, &chunk->list); } else { - message(HAKA_LOG_WARNING, L"tcp_connection", L"retransmit packet (ignored)"); + message(HAKA_LOG_WARNING, "tcp_connection", "retransmit packet (ignored)"); tcp_stream_chunk_free(chunk); return false; } diff --git a/modules/protocol/tcp/tcp.c b/modules/protocol/tcp/tcp.c index f09aa0ef..0bd4bf03 100644 --- a/modules/protocol/tcp/tcp.c +++ b/modules/protocol/tcp/tcp.c @@ -23,10 +23,10 @@ struct tcp_pseudo_header { uint16 len; }; -static void alert_invalid_packet(struct ipv4 *packet, wchar_t *desc) +static void alert_invalid_packet(struct ipv4 *packet, char *desc) { - TOWSTR(srcip, ipv4addr, ipv4_get_src(packet)); - TOWSTR(dstip, ipv4addr, ipv4_get_dst(packet)); + TOSTR(srcip, ipv4addr, ipv4_get_src(packet)); + TOSTR(dstip, ipv4addr, ipv4_get_dst(packet)); ALERT(invalid_packet, 1, 1) description: desc, severity: HAKA_ALERT_LOW, @@ -87,21 +87,21 @@ struct tcp *tcp_dissect(struct ipv4 *packet) /* Not a TCP packet */ if (ipv4_get_proto(packet) != TCP_PROTO) { - error(L"Not a tcp packet"); + error("Not a tcp packet"); return NULL; } assert(packet->payload); if (!vbuffer_check_size(packet->payload, sizeof(struct tcp_header), NULL)) { - alert_invalid_packet(packet, L"corrupted tcp packet, size is too small"); + alert_invalid_packet(packet, "corrupted tcp packet, size is too small"); ipv4_action_drop(packet); return NULL; } tcp = malloc(sizeof(struct tcp)); if (!tcp) { - error(L"Failed to allocate memory"); + error("Failed to allocate memory"); return NULL; } @@ -115,7 +115,7 @@ struct tcp *tcp_dissect(struct ipv4 *packet) hdrlen = _hdrlen.hdr_len << TCP_HDR_LEN; if (hdrlen < sizeof(struct tcp_header) || !vbuffer_check_size(packet->payload, hdrlen, NULL)) { - alert_invalid_packet(packet, L"corrupted tcp packet, header length is too small"); + alert_invalid_packet(packet, "corrupted tcp packet, header length is too small"); ipv4_action_drop(packet); free(tcp); return NULL; @@ -161,7 +161,7 @@ struct tcp *tcp_create(struct ipv4 *packet) struct tcp *tcp = malloc(sizeof(struct tcp)); if (!tcp) { - error(L"Failed to allocate memory"); + error("Failed to allocate memory"); return NULL; } diff --git a/modules/protocol/tcp/tcp.i b/modules/protocol/tcp/tcp.i index 58a73c30..2ccf944b 100644 --- a/modules/protocol/tcp/tcp.i +++ b/modules/protocol/tcp/tcp.i @@ -15,7 +15,6 @@ struct tcp_stream; %} -%include "haka/lua/ipv4-addr.si" %include "haka/lua/swig.si" %include "haka/lua/ref.si" %include "haka/lua/ipv4.si" @@ -56,7 +55,7 @@ struct tcp_stream { struct tcp_stream *stream = malloc(sizeof(struct tcp_stream)); if (!stream) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -89,7 +88,7 @@ struct tcp_stream struct vbuffer_iterator *iter = malloc(sizeof(struct vbuffer_iterator)); if (!iter) { free(iter); - error(L"memory error"); + error("memory error"); return NULL; } diff --git a/modules/protocol/tcp/tcp_connection.lua b/modules/protocol/tcp/tcp_connection.lua index 3d9c3525..fa40030c 100644 --- a/modules/protocol/tcp/tcp_connection.lua +++ b/modules/protocol/tcp/tcp_connection.lua @@ -19,6 +19,7 @@ local tcp_connection_dissector = haka.dissector.new{ tcp_connection_dissector.cnx_table = ipv4.cnx_table() tcp_connection_dissector:register_event('new_connection') +tcp_connection_dissector:register_event('receive_packet') tcp_connection_dissector:register_streamed_event('receive_data') tcp_connection_dissector:register_event('end_connection') @@ -479,6 +480,7 @@ end function tcp_connection_dissector.method:emit(pkt, direction) self.connection:update_stat(direction, pkt.ip.len) + self:trigger('receive_packet', pkt, direction) self.state:update(direction, pkt) end diff --git a/modules/protocol/udp/doc/udp_connection.rst b/modules/protocol/udp/doc/udp_connection.rst index cb0c049f..eec750ce 100644 --- a/modules/protocol/udp/doc/udp_connection.rst +++ b/modules/protocol/udp/doc/udp_connection.rst @@ -85,6 +85,19 @@ Events Event triggered when some data are available on a UDP connection. +.. haka:function:: udp_connection.events.receive_packet(flow, packet, direction) + :module: + :objtype: event + + :param flow: UDP flow. + :paramtype flow: :haka:class:`UdpConnectionDissector` + :param packet: Current receive UDP packet. + :ptype packet: :haka:class:`UdpDissector` + :param direction: Data direction (``'up'`` or ``'down'``). + :paramtype direction: string + + Event triggered when a packet associated with the stream is received. + Helper ------ diff --git a/modules/protocol/udp/udp_connection.lua b/modules/protocol/udp/udp_connection.lua index 0a670fa7..e61087c3 100644 --- a/modules/protocol/udp/udp_connection.lua +++ b/modules/protocol/udp/udp_connection.lua @@ -18,6 +18,7 @@ local udp_connection_dissector = haka.dissector.new{ udp_connection_dissector.cnx_table = ipv4.cnx_table() udp_connection_dissector:register_event('new_connection') +udp_connection_dissector:register_event('receive_packet') udp_connection_dissector:register_event('receive_data') udp_connection_dissector:register_event('end_connection') @@ -158,6 +159,7 @@ end function udp_connection_dissector.method:emit(direction, pkt) self.connection:update_stat(direction, pkt.ip.len) + self:trigger('receive_packet', pkt, direction) self.state:update(direction, pkt) end diff --git a/modules/regexp/pcre/CMakeLists.txt b/modules/regexp/pcre/CMakeLists.txt index d8dd0131..5821d537 100644 --- a/modules/regexp/pcre/CMakeLists.txt +++ b/modules/regexp/pcre/CMakeLists.txt @@ -17,4 +17,6 @@ if(PCRE_FOUND) # Tests add_subdirectory(test) +else() + message(STATUS "Not building module pcre (missing libraries)") endif() diff --git a/modules/regexp/pcre/main.c b/modules/regexp/pcre/main.c index 245d0dfe..52d1e78d 100644 --- a/modules/regexp/pcre/main.c +++ b/modules/regexp/pcre/main.c @@ -11,7 +11,7 @@ #include #include -#define LOG_MODULE L"pcre" +#define LOG_MODULE "pcre" /* We enforce multiline on all API */ #define DEFAULT_COMPILE_OPTIONS PCRE_MULTILINE @@ -28,7 +28,7 @@ #define CHECK_REGEXP_TYPE(re)\ do {\ if (re == NULL || re->super.module != &HAKA_MODULE) {\ - error(L"Wrong regexp struct passed to PCRE module");\ + error("Wrong regexp struct passed to PCRE module");\ goto type_error;\ }\ } while(0) @@ -36,7 +36,7 @@ #define CHECK_REGEXP_SINK_TYPE(sink)\ do {\ if (sink == NULL || sink->super.regexp->module != &HAKA_MODULE) {\ - error(L"Wrong regexp_sink struct passed to PCRE module");\ + error("Wrong regexp_sink struct passed to PCRE module");\ goto type_error;\ }\ } while(0) @@ -82,8 +82,8 @@ static int _vbpartial_exec(struct regexp_sink_pcre *sink, s struct regexp_module HAKA_MODULE = { module: { type: MODULE_REGEXP, - name: L"PCRE regexp engine", - description: L"PCRE regexp engine", + name: "PCRE regexp engine", + description: "PCRE regexp engine", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup @@ -159,7 +159,7 @@ static struct regexp *compile(const char *pattern, int options) re = malloc(sizeof(struct regexp_pcre)); if (!re) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -178,7 +178,7 @@ static struct regexp *compile(const char *pattern, int options) error: free(re); - error(L"PCRE compilation failed with error '%s' at offset %d", errorstr, erroffset); + error("PCRE compilation failed with error '%s' at offset %d", errorstr, erroffset); return NULL; } @@ -249,7 +249,7 @@ static struct regexp_sink_pcre *_create_sink(struct regexp *_re) sink = malloc(sizeof(struct regexp_sink_pcre)); if (!sink) { - error(L"memory error"); + error("memory error"); goto error; } @@ -260,7 +260,7 @@ static struct regexp_sink_pcre *_create_sink(struct regexp *_re) sink->wscount = re->wscount_max; sink->workspace = calloc(sink->wscount, sizeof(int)); if (!sink->workspace) { - error(L"memory error"); + error("memory error"); goto error; } @@ -306,10 +306,10 @@ static bool workspace_grow(struct regexp_sink_pcre *sink) sink->wscount *= 2; - messagef(HAKA_LOG_DEBUG, LOG_MODULE, L"growing PCRE workspace to %d int", sink->wscount); + messagef(HAKA_LOG_DEBUG, LOG_MODULE, "growing PCRE workspace to %d int", sink->wscount); if (sink->wscount > WSCOUNT_MAX) { - error(L"PCRE workspace too big, max allowed size is %d int", WSCOUNT_MAX); + error("PCRE workspace too big, max allowed size is %d int", WSCOUNT_MAX); return false; } @@ -327,7 +327,7 @@ static bool workspace_grow(struct regexp_sink_pcre *sink) sink->workspace = realloc(sink->workspace, sink->wscount*sizeof(int)); if (!sink->workspace) { - error(L"memory error"); + error("memory error"); return false; } @@ -388,7 +388,7 @@ static int _exec(struct regexp *_re, const char *buf, int len, struct regexp_res case PCRE_ERROR_NOMATCH: return REGEXP_NOMATCH; default: - error(L"PCRE internal error %d", ret); + error("PCRE internal error %d", ret); return REGEXP_ERROR; } @@ -407,7 +407,7 @@ static int _partial_exec(struct regexp_sink_pcre *sink, const char *buf, int len assert(buf); if (sink->workspace == NULL) { - error(L"Invalid sink. NULL workspace"); + error("Invalid sink. NULL workspace"); goto error; } @@ -491,7 +491,7 @@ static int _partial_exec(struct regexp_sink_pcre *sink, const char *buf, int len return sink->super.match; default: sink->super.match = REGEXP_ERROR; - error(L"PCRE internal error %d", ret); + error("PCRE internal error %d", ret); return sink->super.match; } diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index 6a71c88b..11d86c2c 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -3,8 +3,8 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. install(FILES empty.lua DESTINATION share/haka/sample) -install(DIRECTORY filter gettingstarted hellopacket ruleset sqli stats smtp_dissector mymodule - DESTINATION share/haka/sample +install(DIRECTORY filter gettingstarted hellopacket ruleset sqli stats + smtp_dissector mymodule DESTINATION share/haka/sample PATTERN "*.in" EXCLUDE) configure_file(mymodule/Makefile.in ${CMAKE_CURRENT_SOURCE_DIR}/mymodule/Makefile @ONLY) diff --git a/sample/mymodule/src/mymodule.c b/sample/mymodule/src/mymodule.c index bf66b373..cbc02ca3 100644 --- a/sample/mymodule/src/mymodule.c +++ b/sample/mymodule/src/mymodule.c @@ -18,8 +18,8 @@ static void cleanup(); struct module MY_MODULE = { type: MODULE_EXTENSION, - name: L"my module", - description: L"my module", + name: "my module", + description: "my module", api_version: HAKA_API_VERSION, init: init, cleanup: cleanup @@ -28,16 +28,16 @@ struct module MY_MODULE = { static int init(struct parameters *args) { - messagef(HAKA_LOG_INFO, L"mymodule", L"init my module"); + messagef(HAKA_LOG_INFO, "mymodule", "init my module"); return 0; } static void cleanup() { - messagef(HAKA_LOG_INFO, L"mymodule", L"cleanup my module"); + messagef(HAKA_LOG_INFO, "mymodule", "cleanup my module"); } void myfunc(void) { - messagef(HAKA_LOG_INFO, L"mymodule", L"myfunc"); + messagef(HAKA_LOG_INFO, "mymodule", "myfunc"); } diff --git a/src/haka/app.c b/src/haka/app.c index 7b6ae126..6bd036fb 100644 --- a/src/haka/app.c +++ b/src/haka/app.c @@ -99,7 +99,7 @@ void initialize() if (sigaction(SIGTERM, &sa, NULL) || sigaction(SIGINT, &sa, NULL) || sigaction(SIGQUIT, &sa, NULL)) { - messagef(HAKA_LOG_FATAL, L"core", L"%s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, "core", "%s", errno_error(errno)); clean_exit(); exit(1); } @@ -108,7 +108,7 @@ void initialize() signal(SIGHUP, handle_sighup); if (!module_set_default_path()) { - fprintf(stderr, "%ls\n", clear_error()); + fprintf(stderr, "%s\n", clear_error()); clean_exit(); exit(1); } @@ -127,11 +127,11 @@ void prepare(int threadcount, bool attach_debugger, bool dissector_graph) } if (packet_module->pass_through()) { - messagef(HAKA_LOG_INFO, L"core", L"setting packet mode to pass-through\n"); + messagef(HAKA_LOG_INFO, "core", "setting packet mode to pass-through\n"); packet_set_mode(MODE_PASSTHROUGH); } - messagef(HAKA_LOG_INFO, L"core", L"loading rule file '%s'", configuration_file); + messagef(HAKA_LOG_INFO, "core", "loading rule file '%s'", configuration_file); /* Add module path to the configuration folder */ { @@ -145,7 +145,7 @@ void prepare(int threadcount, bool attach_debugger, bool dissector_graph) module_add_path(module_path, false); if (check_error()) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); free(module_path); clean_exit(); exit(1); @@ -159,16 +159,16 @@ void prepare(int threadcount, bool attach_debugger, bool dissector_graph) attach_debugger, dissector_graph); if (!thread_states) { assert(check_error()); - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); clean_exit(); exit(1); } if (threadcount > 1) { - messagef(HAKA_LOG_INFO, L"core", L"starting multi-threaded processing on %i threads\n", threadcount); + messagef(HAKA_LOG_INFO, "core", "starting multi-threaded processing on %i threads\n", threadcount); } else { - message(HAKA_LOG_INFO, L"core", L"starting single threaded processing\n"); + message(HAKA_LOG_INFO, "core", "starting single threaded processing\n"); } } @@ -176,7 +176,7 @@ void start() { thread_pool_start(thread_states); if (check_error()) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); clean_exit(); exit(1); } @@ -220,7 +220,7 @@ bool setup_loglevel(char *level) { while (true) { char *value; - wchar_t *module=NULL; + char *module=NULL; log_level loglevel; char *next_level = strchr(level, ','); @@ -234,16 +234,7 @@ bool setup_loglevel(char *level) *value = '\0'; ++value; - module = malloc(sizeof(wchar_t)*(strlen(level)+1)); - if (!module) { - error(L"memory error"); - return false; - } - - if (mbstowcs(module, level, strlen(level)+1) == -1) { - error(L"invalid module string"); - return false; - } + module = level; } else { value = level; @@ -256,8 +247,6 @@ bool setup_loglevel(char *level) setlevel(loglevel, module); - if (module) free(module); - if (next_level) level = next_level; else break; } diff --git a/src/haka/ctl.c b/src/haka/ctl.c index 9b171fb6..cf2fd273 100644 --- a/src/haka/ctl.c +++ b/src/haka/ctl.c @@ -28,7 +28,7 @@ #include "ctl_comm.h" -#define MODULE L"ctl" +#define MODULE "ctl" #define MAX_CLIENT_QUEUE 10 #define MAX_COMMAND_LEN 1024 @@ -45,6 +45,7 @@ struct ctl_client_state { }; struct ctl_server_state { + char *socket_file; int fd; thread_t thread; bool thread_created:1; @@ -121,7 +122,7 @@ UNUSED static bool ctl_start_client_thread(struct ctl_client_state *state, void state->data = data; if (!thread_create(&state->thread, ctl_client_process_thread, state)) { - messagef(HAKA_LOG_DEBUG, MODULE, L"failed to create thread: %ls", clear_error(errno)); + messagef(HAKA_LOG_DEBUG, MODULE, "failed to create thread: %s", clear_error(errno)); return false; } @@ -134,9 +135,9 @@ static enum clt_client_rc ctl_client_process(struct ctl_client_state *state) char *command = ctl_recv_chars(state->fd, NULL); if (!command) { - const wchar_t *error = clear_error(); - if (wcscmp(error, L"end of file") != 0) { - messagef(HAKA_LOG_ERROR, MODULE, L"cannot read from ctl socket: %ls", error); + const char *error = clear_error(); + if (strcmp(error, "end of file") != 0) { + messagef(HAKA_LOG_ERROR, MODULE, "cannot read from ctl socket: %s", error); } return CTL_CLIENT_DONE; } @@ -195,8 +196,8 @@ static void ctl_server_cleanup(struct ctl_server_state *state, bool cancel_threa } if (state->binded) { - if (remove(HAKA_CTL_SOCKET_FILE)) { - messagef(HAKA_LOG_ERROR, MODULE, L"cannot remove socket file: %s", errno_error(errno)); + if (remove(state->socket_file)) { + messagef(HAKA_LOG_ERROR, MODULE, "cannot remove socket file: %s", errno_error(errno)); } state->binded = false; @@ -208,32 +209,38 @@ static void ctl_server_cleanup(struct ctl_server_state *state, bool cancel_threa } } -static bool ctl_server_init(struct ctl_server_state *state) +static bool ctl_server_init(struct ctl_server_state *state, const char *socket_file) { struct sockaddr_un addr; socklen_t len; int err; + state->socket_file = strdup(socket_file); + if (!state->socket_file) { + messagef(HAKA_LOG_FATAL, MODULE, "memory error"); + return false; + } + mutex_init(&state->lock, true); list2_init(&state->clients); /* Create the socket */ if ((state->fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { - messagef(HAKA_LOG_FATAL, MODULE, L"cannot create ctl server socket: %s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, MODULE, "cannot create ctl server socket: %s", errno_error(errno)); return false; } bzero((char *)&addr, sizeof(addr)); addr.sun_family = AF_UNIX; - strcpy(addr.sun_path, HAKA_CTL_SOCKET_FILE); + strcpy(addr.sun_path, state->socket_file); len = strlen(addr.sun_path) + sizeof(addr.sun_family); err = bind(state->fd, (struct sockaddr *)&addr, len); if (err && errno == EADDRINUSE) { - if (unlink(HAKA_CTL_SOCKET_FILE)) { - messagef(HAKA_LOG_FATAL, MODULE, L"cannot remove ctl server socket: %s", errno_error(errno)); + if (unlink(state->socket_file)) { + messagef(HAKA_LOG_FATAL, MODULE, "cannot remove ctl server socket: %s", errno_error(errno)); ctl_server_cleanup(&ctl_server, true); return false; } @@ -242,7 +249,7 @@ static bool ctl_server_init(struct ctl_server_state *state) } if (err) { - messagef(HAKA_LOG_FATAL, MODULE, L"cannot bind ctl server socket: %s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, MODULE, "cannot bind ctl server socket: %s", errno_error(errno)); ctl_server_cleanup(&ctl_server, true); return false; } @@ -264,7 +271,7 @@ static void ctl_server_accept(struct ctl_server_state *state, fd_set *listfds, i fd = accept(state->fd, (struct sockaddr *)&addr, &len); if (fd < 0) { - messagef(HAKA_LOG_DEBUG, MODULE, L"failed to accept ctl connection: %s", errno_error(errno)); + messagef(HAKA_LOG_DEBUG, MODULE, "failed to accept ctl connection: %s", errno_error(errno)); return; } @@ -273,7 +280,7 @@ static void ctl_server_accept(struct ctl_server_state *state, fd_set *listfds, i client = malloc(sizeof(struct ctl_client_state)); if (!client) { thread_setcancelstate(true); - message(HAKA_LOG_ERROR, MODULE, L"memmory error"); + message(HAKA_LOG_ERROR, MODULE, "memmory error"); return; } @@ -335,7 +342,7 @@ static void *ctl_server_coreloop(void *param) /* Block all signal to let the main thread handle them */ sigfillset(&set); if (!thread_sigmask(SIG_BLOCK, &set, NULL)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); return NULL; } @@ -350,7 +357,7 @@ static void *ctl_server_coreloop(void *param) rc = select(maxfd+1, &readfds, NULL, NULL, NULL); if (rc < 0) { - messagef(HAKA_LOG_FATAL, MODULE, L"failed to handle ctl connection (closing ctl socket): %s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, MODULE, "failed to handle ctl connection (closing ctl socket): %s", errno_error(errno)); break; } else if (rc > 0) { @@ -370,15 +377,15 @@ static void *ctl_server_coreloop(void *param) return NULL; } -bool prepare_ctl_server() +bool prepare_ctl_server(const char *ctl_socket_file) { - return ctl_server_init(&ctl_server); + return ctl_server_init(&ctl_server, ctl_socket_file); } bool start_ctl_server() { if (listen(ctl_server.fd, MAX_CLIENT_QUEUE)) { - messagef(HAKA_LOG_FATAL, MODULE, L"failed to listen on ctl socket: %s", errno_error(errno)); + messagef(HAKA_LOG_FATAL, MODULE, "failed to listen on ctl socket: %s", errno_error(errno)); ctl_server_cleanup(&ctl_server, true); return false; } @@ -404,14 +411,14 @@ void stop_ctl_server() */ int redirect_message(int fd, mutex_t *mutex, log_level level, - const wchar_t *module, const wchar_t *message) + const char *module, const char *message) { if (fd > 0) { mutex_lock(mutex); if (!ctl_send_int(fd, level) || - !ctl_send_wchars(fd, module, -1) || - !ctl_send_wchars(fd, message, -1)) { + !ctl_send_chars(fd, module, -1) || + !ctl_send_chars(fd, message, -1)) { mutex_unlock(mutex); clear_error(); return false; @@ -428,7 +435,7 @@ struct redirect_logger { mutex_t mutex; }; -int redirect_logger_message(struct logger *_logger, log_level level, const wchar_t *module, const wchar_t *message) +int redirect_logger_message(struct logger *_logger, log_level level, const char *module, const char *message) { struct redirect_logger *logger = (struct redirect_logger *)_logger; if (!redirect_message(logger->fd, &logger->mutex, level, module, message)) { @@ -451,7 +458,7 @@ struct redirect_logger *redirect_logger_create(d) { struct redirect_logger *logger = malloc(sizeof(struct redirect_logger)); if (!logger) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -474,7 +481,7 @@ struct redirect_alerter { bool redirect_alerter_alert(struct alerter *_alerter, uint64 id, const struct time *time, const struct alert *alert) { struct redirect_alerter *alerter = (struct redirect_alerter *)_alerter; - if (!redirect_message(alerter->fd, &alerter->mutex, HAKA_LOG_INFO, L"alert", + if (!redirect_message(alerter->fd, &alerter->mutex, HAKA_LOG_INFO, "alert", alert_tostring(id, time, alert, "", "\n\t", false))) { alerter->alerter.mark_for_remove = true; } @@ -484,7 +491,7 @@ bool redirect_alerter_alert(struct alerter *_alerter, uint64 id, const struct ti bool redirect_alerter_update(struct alerter *_alerter, uint64 id, const struct time *time, const struct alert *alert) { struct redirect_alerter *alerter = (struct redirect_alerter *)_alerter; - if (!redirect_message(alerter->fd, &alerter->mutex, HAKA_LOG_INFO, L"alert", + if (!redirect_message(alerter->fd, &alerter->mutex, HAKA_LOG_INFO, "alert", alert_tostring(id, time, alert, "update ", "\n\t", false))) { alerter->alerter.mark_for_remove = true; } @@ -505,7 +512,7 @@ struct redirect_alerter *redirect_alerter_create(d) { struct redirect_alerter *alerter = malloc(sizeof(struct redirect_alerter)); if (!alerter) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -561,10 +568,10 @@ static enum clt_client_rc ctl_client_process_command(struct ctl_client_state *st return CTL_CLIENT_DONE; } - messagef(HAKA_LOG_INFO, MODULE, L"setting log level to %s", level); + messagef(HAKA_LOG_INFO, MODULE, "setting log level to %s", level); if (!setup_loglevel(level)) { - const wchar_t *err = clear_error(); + const char *err = clear_error(); messagef(HAKA_LOG_ERROR, MODULE, err); ctl_send_status(state->fd, -1, err); } @@ -671,7 +678,7 @@ static enum clt_client_rc ctl_client_process_command(struct ctl_client_state *st } else { if (strlen(command) > 0) { - messagef(HAKA_LOG_ERROR, MODULE, L"invalid ctl command '%s'", command); + messagef(HAKA_LOG_ERROR, MODULE, "invalid ctl command '%s'", command); ctl_send_status(state->fd, -1, NULL); } return CTL_CLIENT_DONE; diff --git a/src/haka/haka.1 b/src/haka/haka.1 index daa9408d..5bb94e47 100644 --- a/src/haka/haka.1 +++ b/src/haka/haka.1 @@ -44,6 +44,12 @@ Override the rule configuration file. .TP \fB\-\-opt
:[=]\fP Override configuration parameter. +.TP +\fB\-\-pid-file \fP +Full path to pid file. +.TP +\fB\-\-ctl-socket \fP +Full path to socket control file. .SH FILES \fB/var/run/haka.pid\fP records the pid of haka. .br diff --git a/src/haka/haka.c b/src/haka/haka.c index 622e36d1..57f1d0c5 100644 --- a/src/haka/haka.c +++ b/src/haka/haka.c @@ -43,25 +43,31 @@ static void help(const char *program) usage(stdout, program); fprintf(stdout, "Options:\n"); - fprintf(stdout, "\t-h,--help: Display this information\n"); - fprintf(stdout, "\t--version: Display version information\n"); - fprintf(stdout, "\t-c,--config : Load a specific configuration file\n" - "\t (default: " HAKA_CONFIG ")\n"); - fprintf(stdout, "\t-r,--rule : Override the rule configuration file\n"); - fprintf(stdout, "\t-d,--debug: Display debug output\n"); + fprintf(stdout, "\t-h,--help: Display this information\n"); + fprintf(stdout, "\t--version: Display version information\n"); + fprintf(stdout, "\t-c,--config : Load a specific configuration file\n" + "\t (default: " HAKA_CONFIG ")\n"); + fprintf(stdout, "\t-r,--rule : Override the rule configuration file\n"); + fprintf(stdout, "\t-d,--debug: Display debug output\n"); fprintf(stdout, "\t--opt
:[=]:\n"); - fprintf(stdout, "\t Override configuration parameter\n"); - fprintf(stdout, "\t-l,--loglevel : Set the log level\n"); - fprintf(stdout, "\t (debug, info, warning, error or fatal)\n"); - fprintf(stdout, "\t--debug-lua: Activate lua debugging (and keep haka in foreground)\n"); - fprintf(stdout, "\t--dump-dissector-graph: Dump dissector internals (grammar and state machine) in file .dot\n"); - fprintf(stdout, "\t--no-daemon: Do no run in the background\n"); + fprintf(stdout, "\t Override configuration parameter\n"); + fprintf(stdout, "\t-l,--loglevel : Set the log level\n"); + fprintf(stdout, "\t (debug, info, warning, error or fatal)\n"); + fprintf(stdout, "\t--debug-lua: Activate lua debugging (and keep haka in foreground)\n"); + fprintf(stdout, "\t--dump-dissector-graph: Dump dissector internals (grammar and state machine) in file .dot\n"); + fprintf(stdout, "\t--no-daemon: Do no run in the background\n"); + fprintf(stdout, "\t--pid-file Full path to pid file\n" + "\t (default: " HAKA_PID_FILE ")\n"); + fprintf(stdout, "\t--ctl-file Full path to socket control file\n" + "\t (default: " HAKA_CTL_SOCKET_FILE ")\n"); } -static bool daemonize = true; +static bool daemonize = true; static char *config = NULL; -static bool lua_debugger = false; -static bool dissector_graph = false; +static bool lua_debugger = false; +static bool dissector_graph = false; +static char *pid_file_path = NULL; +static char *ctl_file_path = NULL; struct config_override { char *key; @@ -80,14 +86,14 @@ static void add_override(const char *key, const char *value) { struct config_override *override = vector_push(&config_overrides, struct config_override); if (!override) { - message(HAKA_LOG_FATAL, L"core", L"memory error"); + message(HAKA_LOG_FATAL, "core", "memory error"); exit(2); } override->key = strdup(key); override->value = strdup(value); if (!override->key || !override->value) { - message(HAKA_LOG_FATAL, L"core", L"memory error"); + message(HAKA_LOG_FATAL, "core", "memory error"); clean_exit(); exit(2); } @@ -109,10 +115,12 @@ static int parse_cmdline(int *argc, char ***argv) { "no-daemon", no_argument, 0, 'D' }, { "opt", required_argument, 0, 'o' }, { "rule", required_argument, 0, 'r' }, + { "pid-file", required_argument, 0, 'P' }, + { "ctl-file", required_argument, 0, 'S' }, { 0, 0, 0, 0 } }; - while ((c = getopt_long(*argc, *argv, "dl:hc:r:", long_options, &index)) != -1) { + while ((c = getopt_long(*argc, *argv, "dl:hc:r:P:S:", long_options, &index)) != -1) { switch (c) { case 'd': add_override("log:level", "debug"); @@ -138,7 +146,7 @@ static int parse_cmdline(int *argc, char ***argv) case 'c': config = strdup(optarg); if (!config) { - message(HAKA_LOG_FATAL, L"core", L"memory error"); + message(HAKA_LOG_FATAL, "core", "memory error"); clean_exit(); exit(2); } @@ -168,6 +176,24 @@ static int parse_cmdline(int *argc, char ***argv) } break; + case 'P': + pid_file_path = strdup(optarg); + if (!pid_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + break; + + case 'S': + ctl_file_path = strdup(optarg); + if (!ctl_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + break; + default: usage(stderr, (*argv)[0]); return 2; @@ -182,7 +208,25 @@ static int parse_cmdline(int *argc, char ***argv) if (!config) { config = strdup(HAKA_CONFIG); if (!config) { - message(HAKA_LOG_FATAL, L"core", L"memory error"); + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + } + + if (!pid_file_path) { + pid_file_path = strdup(HAKA_PID_FILE); + if (!pid_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + } + + if (!ctl_file_path) { + ctl_file_path = strdup(HAKA_CTL_SOCKET_FILE); + if (!ctl_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); clean_exit(); exit(2); } @@ -197,7 +241,7 @@ int read_configuration(const char *file) { struct parameters *config = parameters_open(file); if (check_error()) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); return 2; } @@ -232,13 +276,13 @@ int read_configuration(const char *file) if (_level) { char *level = strdup(_level); if (!level) { - message(HAKA_LOG_FATAL, L"core", L"memory error"); + message(HAKA_LOG_FATAL, "core", "memory error"); clean_exit(); exit(1); } if (!setup_loglevel(level)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); clean_exit(); exit(1); } @@ -259,21 +303,21 @@ int read_configuration(const char *file) struct module *logger_module = module_load(module, config); if (!logger_module) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load logging module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load logging module: %s", clear_error()); clean_exit(); return 1; } logger = log_module_logger(logger_module, config); if (!logger) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot initialize logging module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot initialize logging module: %s", clear_error()); module_release(logger_module); clean_exit(); return 1; } if (!add_logger(logger)) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot install logging module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot install logging module: %s", clear_error()); logger->destroy(logger); module_release(logger_module); clean_exit(); @@ -295,21 +339,21 @@ int read_configuration(const char *file) struct module *alerter_module = module_load(module, config); if (!alerter_module) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load alert module: %s", clear_error()); clean_exit(); return 1; } alerter = alert_module_alerter(alerter_module, config); if (!alerter) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot initialize alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot initialize alert module: %s", clear_error()); module_release(alerter_module); clean_exit(); return 1; } if (!add_alerter(alerter)) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot install alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot install alert module: %s", clear_error()); alerter->destroy(alerter); module_release(alerter_module); clean_exit(); @@ -330,7 +374,7 @@ int read_configuration(const char *file) module = module_load("alert/file", NULL); if (!module) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load default alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load default alert module: %s", clear_error()); clean_exit(); return 1; } @@ -350,7 +394,7 @@ int read_configuration(const char *file) if (module) { struct module *packet = module_load(module, config); if (!packet) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load packet module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load packet module: %s", clear_error()); clean_exit(); return 1; } @@ -359,7 +403,7 @@ int read_configuration(const char *file) module_release(packet); } else { - message(HAKA_LOG_FATAL, L"core", L"no packet module specified"); + message(HAKA_LOG_FATAL, "core", "no packet module specified"); clean_exit(); return 1; } @@ -371,7 +415,7 @@ int read_configuration(const char *file) { const char *configuration = parameters_get_string(config, "configuration", NULL); if (!configuration) { - message(HAKA_LOG_FATAL, L"core", L"no configuration specified"); + message(HAKA_LOG_FATAL, "core", "no configuration specified"); clean_exit(); return 1; } @@ -392,7 +436,7 @@ void clean_exit() stop_ctl_server(); if (haka_started) { - unlink(HAKA_PID_FILE); + unlink(pid_file_path); } vector_destroy(&config_overrides); @@ -403,13 +447,13 @@ void clean_exit() bool check_running_haka() { pid_t pid; - FILE *pid_file = fopen(HAKA_PID_FILE, "r"); + FILE *pid_file = fopen(pid_file_path, "r"); if (!pid_file) { return false; } if (fscanf(pid_file, "%i", &pid) != 1) { - message(HAKA_LOG_WARNING, L"core", L"malformed pid file"); + message(HAKA_LOG_WARNING, "core", "malformed pid file"); return false; } @@ -417,7 +461,7 @@ bool check_running_haka() return false; } - message(HAKA_LOG_FATAL, L"core", L"an instance of haka is already running"); + message(HAKA_LOG_FATAL, "core", "an instance of haka is already running"); return true; } @@ -442,6 +486,8 @@ int main(int argc, char *argv[]) ret = parse_cmdline(&argc, &argv); if (ret >= 0) { free(config); + free(pid_file_path); + free(ctl_file_path); clean_exit(); return ret; } @@ -460,7 +506,9 @@ int main(int argc, char *argv[]) haka_started = true; - if (!prepare_ctl_server()) { + ret = prepare_ctl_server(ctl_file_path); + free(ctl_file_path); + if (!ret) { clean_exit(); return 2; } @@ -468,7 +516,7 @@ int main(int argc, char *argv[]) { struct luadebug_user *user = luadebug_user_readline(); if (!user) { - message(HAKA_LOG_FATAL, L"core", L"cannot create readline handler"); + message(HAKA_LOG_FATAL, "core", "cannot create readline handler"); clean_exit(); return 2; } @@ -483,8 +531,7 @@ int main(int argc, char *argv[]) child = fork(); if (child == -1) { - message(HAKA_LOG_FATAL, L"core", L"failed to daemonize"); - fclose(pid_file); + message(HAKA_LOG_FATAL, "core", "failed to daemonize"); clean_exit(); return 1; } @@ -504,9 +551,9 @@ int main(int argc, char *argv[]) prepare(-1, lua_debugger, dissector_graph); - pid_file = fopen(HAKA_PID_FILE, "w"); + pid_file = fopen(pid_file_path, "w"); if (!pid_file) { - message(HAKA_LOG_FATAL, L"core", L"cannot create pid file"); + message(HAKA_LOG_FATAL, "core", "cannot create pid file"); clean_exit(); return 1; } @@ -527,12 +574,12 @@ int main(int argc, char *argv[]) luadebug_debugger_user(NULL); luadebug_interactive_user(NULL); - message(HAKA_LOG_INFO, L"core", L"switch to background"); + message(HAKA_LOG_INFO, "core", "switch to background"); { const int nullfd = open("/dev/null", O_RDWR); if (nullfd == -1) { - message(HAKA_LOG_FATAL, L"core", L"failed to daemonize"); + message(HAKA_LOG_FATAL, "core", "failed to daemonize"); fclose(pid_file); clean_exit(); return 1; @@ -550,7 +597,7 @@ int main(int argc, char *argv[]) start(); - message(HAKA_LOG_INFO, L"core", L"stopping haka"); + message(HAKA_LOG_INFO, "core", "stopping haka"); clean_exit(); return 0; diff --git a/src/haka/hakapcap.c b/src/haka/hakapcap.c index 2cc37ca4..f6cab49c 100644 --- a/src/haka/hakapcap.c +++ b/src/haka/hakapcap.c @@ -80,7 +80,7 @@ static int parse_cmdline(int *argc, char ***argv) case 'l': if (!setup_loglevel(optarg)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); clean_exit(); exit(1); } @@ -176,7 +176,7 @@ int main(int argc, char *argv[]) args = NULL; if (!pcap) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load packet module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load packet module: %s", clear_error()); clean_exit(); return 1; } @@ -202,14 +202,14 @@ int main(int argc, char *argv[]) module = module_load("alert/file", NULL); if (!module) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load alert module: %s", clear_error()); clean_exit(); return 1; } alerter = alert_module_alerter(module, args); if (!alerter) { - messagef(HAKA_LOG_FATAL, L"core", L"cannot load alert module: %ls", clear_error()); + messagef(HAKA_LOG_FATAL, "core", "cannot load alert module: %s", clear_error()); clean_exit(); return 1; } @@ -226,7 +226,7 @@ int main(int argc, char *argv[]) { struct luadebug_user *user = luadebug_user_readline(); if (!user) { - message(HAKA_LOG_FATAL, L"core", L"cannot create readline handler"); + message(HAKA_LOG_FATAL, "core", "cannot create readline handler"); clean_exit(); return 2; } diff --git a/src/haka/test/CMakeLists.txt b/src/haka/test/CMakeLists.txt index 023d8718..99dc8a5b 100644 --- a/src/haka/test/CMakeLists.txt +++ b/src/haka/test/CMakeLists.txt @@ -5,6 +5,23 @@ # Tests include(TestPcap) +add_custom_target(bench + COMMAND ${CMAKE_COMMAND} + -DCTEST_MODULE_DIR=${CTEST_MODULE_DIR} + -DPROJECT_SOURCE_DIR=${CMAKE_SOURCE_DIR} + -DEXE=${CMAKE_CURRENT_SOURCE_DIR}/hakabench + -DEXE_OPTIONS=${TEST_BENCH_OPTIONS} + -DBENCH=${CMAKE_CURRENT_SOURCE_DIR}/benchmark + -DHAKA_PATH=${TEST_RUNDIR} + -DCONF=${CMAKE_BINARY_DIR}/test/etc/haka/haka.conf + -P ${CTEST_MODULE_DIR}/TestBenchRun.cmake + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Run benchmark test" VERBATIM + DEPENDS pretests +) + +install(PROGRAMS hakabench DESTINATION bin) + TEST_PCAP(core rule-error) TEST_PCAP(core hakaevents) TEST_PCAP(core timestamp) diff --git a/src/haka/test/benchmark/tcp-big-100000.pcap b/src/haka/test/benchmark/tcp-big-100000.pcap new file mode 100644 index 00000000..f6215e11 Binary files /dev/null and b/src/haka/test/benchmark/tcp-big-100000.pcap differ diff --git a/src/haka/test/benchmark/tcp-big.lua b/src/haka/test/benchmark/tcp-big.lua new file mode 120000 index 00000000..b01f8264 --- /dev/null +++ b/src/haka/test/benchmark/tcp-big.lua @@ -0,0 +1 @@ +tcp.lua \ No newline at end of file diff --git a/src/haka/test/benchmark/tcp-small-100000.pcap b/src/haka/test/benchmark/tcp-small-100000.pcap new file mode 100644 index 00000000..333afefe Binary files /dev/null and b/src/haka/test/benchmark/tcp-small-100000.pcap differ diff --git a/src/haka/test/benchmark/tcp-small.lua b/src/haka/test/benchmark/tcp-small.lua new file mode 120000 index 00000000..b01f8264 --- /dev/null +++ b/src/haka/test/benchmark/tcp-small.lua @@ -0,0 +1 @@ +tcp.lua \ No newline at end of file diff --git a/src/haka/test/benchmark/tcp.lua b/src/haka/test/benchmark/tcp.lua new file mode 100644 index 00000000..aed47fee --- /dev/null +++ b/src/haka/test/benchmark/tcp.lua @@ -0,0 +1,8 @@ +local ipv4 = require('protocol/ipv4') +local tcp = require('protocol/tcp') + +haka.rule{ + hook = tcp.events.receive_packet, + eval = function (pkt) + end +} diff --git a/src/haka/test/benchmark/udp-big-100000.pcap b/src/haka/test/benchmark/udp-big-100000.pcap new file mode 100644 index 00000000..636126bd Binary files /dev/null and b/src/haka/test/benchmark/udp-big-100000.pcap differ diff --git a/src/haka/test/benchmark/udp-big.lua b/src/haka/test/benchmark/udp-big.lua new file mode 120000 index 00000000..ed5e2b28 --- /dev/null +++ b/src/haka/test/benchmark/udp-big.lua @@ -0,0 +1 @@ +udp.lua \ No newline at end of file diff --git a/src/haka/test/benchmark/udp-small-100000.pcap b/src/haka/test/benchmark/udp-small-100000.pcap new file mode 100644 index 00000000..c10b48dc Binary files /dev/null and b/src/haka/test/benchmark/udp-small-100000.pcap differ diff --git a/src/haka/test/benchmark/udp-small.lua b/src/haka/test/benchmark/udp-small.lua new file mode 120000 index 00000000..ed5e2b28 --- /dev/null +++ b/src/haka/test/benchmark/udp-small.lua @@ -0,0 +1 @@ +udp.lua \ No newline at end of file diff --git a/src/haka/test/benchmark/udp.lua b/src/haka/test/benchmark/udp.lua new file mode 100644 index 00000000..577b367e --- /dev/null +++ b/src/haka/test/benchmark/udp.lua @@ -0,0 +1,12 @@ +-- This Source Code Form is subject to the terms of the Mozilla Public +-- License, v. 2.0. If a copy of the MPL was not distributed with this +-- file, You can obtain one at http://mozilla.org/MPL/2.0/. + +local ipv4 = require('protocol/ipv4') +local udp = require('protocol/udp') + +haka.rule{ + hook = udp.events.receive_packet, + eval = function (pkt) + end +} diff --git a/src/haka/test/hakabench b/src/haka/test/hakabench new file mode 100755 index 00000000..de9da411 --- /dev/null +++ b/src/haka/test/hakabench @@ -0,0 +1,156 @@ +#!/bin/sh +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +set -e + +PAD=".................................................." +AWK_RESULT='$0 ~ /^info benchmark: processing/ { print $4" "$7" "$10; }' +AWK_PROGRESS='$0 ~ /^info benchmark: progress/ { + printf " %5s %%\033[8D", $4 + fflush() +} +$0 ~ /^info benchmark: processing/ { + printf "Passed %13s Mib/s\033[K\n", $10 +}' + +_usage() { + echo "Usage: $1 [-vjkh] " + echo "Options:" + echo "\t-j : Run haka on threads" + echo "\t-o : Output benchmark results to " + echo "\t-v: Display debug output" + echo "\t-k: Keep configuration files" + echo "\t-h: Display this help" +} + +_error() { + printf "$@" 1>&2 +} + +_info() { + printf "$@" +} + +_debug() { + if $DEBUG; then + printf "$@" + fi +} + +_bench_dir() { + for TARGET in $1/*.pcap; do + _do_bench $TARGET + done +} + +_gen_conf() { + CONF=`mktemp $1` + cat > $CONF <&1 | tee $OUT | gawk "$AWK_PROGRESS" + RET=$? + set -e + + if $RM_CONFIG; then + rm $CONF + fi + + if [ $RET -ne 0 ]; then + _info "\033[KFailed\n" + RESULT="$RESULT$NAME Failed\n" + return + fi + + RES=`gawk "$AWK_RESULT" $OUT` + RESULT="$RESULT$NAME $THREAD $RES\n" +} + +THREAD=`nproc` +DEBUG=false +RM_CONFIG=true +RESULT_FILE="hakabench-result-`date +%FT%T`.txt" + +while getopts "vkhj:o:" OPT; do + case $OPT in + v) + DEBUG=true + ;; + j) + THREAD="$OPTARG" + ;; + k) + RM_CONFIG=false + ;; + o) + RESULT_FILE="$OPTARG" + ;; + h|\?) + _usage $0 + exit 0 + ;; + esac +done + +shift $((OPTIND-1)) + +if [ -z "$1" ]; then + _error "Missing benchmark directory or file.\n" + _usage $0 + exit 1 +fi + +for TARGET in "$@"; do + if [ -d $TARGET ]; then + _bench_dir $TARGET + elif [ -f $TARGET ]; then + _do_bench $TARGET + else + _error "invalid target $TARGET\n" + fi +done + +# Save result +echo -n $RESULT > $RESULT_FILE + +_info "\nResults (also in $RESULT_FILE):\n" +echo $RESULT diff --git a/src/haka/thread.c b/src/haka/thread.c index 0c5d1b98..4ed64a52 100644 --- a/src/haka/thread.c +++ b/src/haka/thread.c @@ -76,12 +76,12 @@ static void filter_wrapper(struct thread_state *state, struct packet *pkt) if (!lua_isnil(state->lua->L, -1)) { if (!lua_pushppacket(state->lua->L, pkt)) { - message(HAKA_LOG_ERROR, L"core", L"packet internal error"); + message(HAKA_LOG_ERROR, "core", "packet internal error"); packet_drop(pkt); } else { if (lua_pcall(state->lua->L, 1, 0, h)) { - lua_state_print_error(state->lua->L, L"filter"); + lua_state_print_error(state->lua->L, "filter"); packet_drop(pkt); } } @@ -147,11 +147,11 @@ static struct thread_state *init_thread_state(struct packet_module *packet_modul state->state = STATE_NOTSARTED; state->engine = NULL; - messagef(HAKA_LOG_INFO, L"core", L"initializing thread %d", thread_id); + messagef(HAKA_LOG_INFO, "core", "initializing thread %d", thread_id); state->lua = lua_state_init(); if (!state->lua) { - message(HAKA_LOG_FATAL, L"core", L"unable to create lua state"); + message(HAKA_LOG_FATAL, "core", "unable to create lua state"); cleanup_thread_state(state); return NULL; } @@ -176,7 +176,7 @@ static struct thread_state *init_thread_state(struct packet_module *packet_modul state->capture = packet_module->init_state(thread_id); if (!state->capture) { - message(HAKA_LOG_FATAL, L"core", L"unable to create packet capture state"); + message(HAKA_LOG_FATAL, "core", "unable to create packet capture state"); cleanup_thread_state(state); return NULL; } @@ -200,7 +200,7 @@ static bool init_thread_lua_state(struct thread_state *state) lua_getglobal(state->lua->L, "require"); lua_pushstring(state->lua->L, "rule"); if (lua_pcall(state->lua->L, 1, 0, h)) { - lua_state_print_error(state->lua->L, L"init"); + lua_state_print_error(state->lua->L, "init"); lua_pop(state->lua->L, 1); LUA_STACK_CHECK(state->lua->L, 0); @@ -215,7 +215,7 @@ static bool init_thread_lua_state(struct thread_state *state) lua_getglobal(state->lua->L, "haka"); lua_getfield(state->lua->L, -1, "rule_summary"); if (lua_pcall(state->lua->L, 0, 0, h)) { - lua_state_print_error(state->lua->L, L"init"); + lua_state_print_error(state->lua->L, "init"); lua_pop(state->lua->L, 1); LUA_STACK_CHECK(state->lua->L, 0); @@ -243,14 +243,14 @@ static void *thread_main_loop(void *_state) sigdelset(&set, SIGFPE); if (!thread_sigmask(SIG_BLOCK, &set, NULL)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); barrier_wait(&state->pool->thread_start_sync); state->state = STATE_ERROR; return NULL; } if (!timer_init_thread()) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); barrier_wait(&state->pool->thread_start_sync); state->state = STATE_ERROR; return NULL; @@ -259,7 +259,7 @@ static void *thread_main_loop(void *_state) /* To make sure we can still cancel even if some thread are locked in * infinite loops */ if (!thread_setcanceltype(THREAD_CANCEL_ASYNCHRONOUS)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); barrier_wait(&state->pool->thread_start_sync); state->state = STATE_ERROR; return NULL; @@ -279,7 +279,7 @@ static void *thread_main_loop(void *_state) if (!state->pool->single) { if (!barrier_wait(&state->pool->thread_start_sync)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); state->state = STATE_ERROR; engine_thread_update_status(state->engine, THREAD_DEFUNC); return NULL; @@ -288,7 +288,7 @@ static void *thread_main_loop(void *_state) if (!state->pool->single) { if (!barrier_wait(&state->pool->thread_sync)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); state->state = STATE_ERROR; engine_thread_update_status(state->engine, THREAD_DEFUNC); return NULL; @@ -340,7 +340,7 @@ struct thread_pool *thread_pool_create(int count, struct packet_module *packet_m pool = malloc(sizeof(struct thread_pool)); if (!pool) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -348,7 +348,7 @@ struct thread_pool *thread_pool_create(int count, struct packet_module *packet_m pool->threads = malloc(sizeof(struct thread_state*)*count); if (!pool) { - error(L"memory error"); + error("memory error"); thread_pool_cleanup(pool); return NULL; } @@ -376,7 +376,7 @@ struct thread_pool *thread_pool_create(int count, struct packet_module *packet_m for (i=0; ithreads[i] = init_thread_state(packet_module, i, dissector_graph); if (!pool->threads[i]) { - error(L"thread initialization error"); + error("thread initialization error"); thread_pool_cleanup(pool); return NULL; } @@ -385,7 +385,7 @@ struct thread_pool *thread_pool_create(int count, struct packet_module *packet_m if (pool->single) { if (!init_thread_lua_state(pool->threads[i])) { - error(L"thread initialization error"); + error("thread initialization error"); thread_pool_cleanup(pool); return NULL; } @@ -404,7 +404,7 @@ struct thread_pool *thread_pool_create(int count, struct packet_module *packet_m } if (pool->threads[i]->state == STATE_ERROR) { - error(L"thread initialization error"); + error("thread initialization error"); thread_pool_cleanup(pool); return NULL; } @@ -455,7 +455,7 @@ void thread_pool_wait(struct thread_pool *pool) pool->threads[i]->state != STATE_JOINED) { void *ret; if (!thread_join(pool->threads[i]->thread, &ret)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); } pool->threads[i]->state = STATE_JOINED; } @@ -470,7 +470,7 @@ void thread_pool_cancel(struct thread_pool *pool) for (i=0; icount; ++i) { if (pool->threads[i] && pool->threads[i]->state == STATE_RUNNING) { if (!thread_cancel(pool->threads[i]->thread)) { - message(HAKA_LOG_FATAL, L"core", clear_error()); + message(HAKA_LOG_FATAL, "core", clear_error()); } pool->threads[i]->state = STATE_CANCELED; } @@ -494,7 +494,7 @@ void thread_pool_start(struct thread_pool *pool) thread_pool_wait(pool); } else { - error(L"no thread to run"); + error("no thread to run"); } } diff --git a/src/hakactl/commands.c b/src/hakactl/commands.c index 0cfd9790..a5d8d22e 100644 --- a/src/hakactl/commands.c +++ b/src/hakactl/commands.c @@ -18,10 +18,10 @@ static int check_status(int fd, const char *format, ...) { if (ctl_recv_status(fd) == -1) { - const wchar_t *err = clear_error(); - if (!err) err = L"failed!"; + const char *err = clear_error(); + if (!err) err = "failed!"; - printf(": %s%ls%s", c(RED, use_colors), err, c(CLEAR, use_colors)); + printf(": %s%s%s", c(RED, use_colors), err, c(CLEAR, use_colors)); printf("\r[%sFAIL%s]\n", c(RED, use_colors), c(CLEAR, use_colors)); return COMMAND_FAILED; } @@ -87,19 +87,19 @@ struct command command_stop = { static bool display_log_line(int fd) { log_level level; - wchar_t *module, *msg; + char *module, *msg; level = ctl_recv_int(fd); if (check_error()) { return false; } - module = ctl_recv_wchars(fd, NULL); + module = ctl_recv_chars(fd, NULL); if (!module) { return false; } - msg = ctl_recv_wchars(fd, NULL); + msg = ctl_recv_chars(fd, NULL); if (!msg) { return false; } @@ -185,14 +185,14 @@ static int run_remote(int fd, const char *command) struct luadebug_user *readline_user = luadebug_user_readline(); if (!readline_user) { - printf(": %ls", clear_error()); + printf(": %s", clear_error()); printf("\r[%sFAIL%s]\n", c(RED, use_colors), c(CLEAR, use_colors)); } if (check_status(fd, NULL) == COMMAND_SUCCESS) { luadebug_user_remote_server(fd, readline_user); if (check_error()) { - message(HAKA_LOG_FATAL, L"debug", clear_error()); + message(HAKA_LOG_FATAL, "debug", clear_error()); return COMMAND_FAILED; } return COMMAND_SUCCESS; diff --git a/src/hakactl/console.c b/src/hakactl/console.c index 99dc5ce3..bb624787 100644 --- a/src/hakactl/console.c +++ b/src/hakactl/console.c @@ -44,7 +44,7 @@ bool initialize_console(struct lua_state *state) console_path = malloc(size); if (!console_path) { - error(L"memory error"); + error("memory error"); return false; } @@ -52,7 +52,7 @@ bool initialize_console(struct lua_state *state) dir = opendir(console_path); if (!dir) { - error(L"cannot open console script folder: %s", console_path); + error("cannot open console script folder: %s", console_path); return false; } else { @@ -75,11 +75,11 @@ bool initialize_console(struct lua_state *state) continue; } - messagef(HAKA_LOG_DEBUG, L"hakactl", L"loading console script '%s'", entry.d_name); + messagef(HAKA_LOG_DEBUG, "hakactl", "loading console script '%s'", entry.d_name); if (luaL_dofile(state->L, fullfilename)) { const char *msg = lua_tostring(state->L, -1); - messagef(HAKA_LOG_ERROR, L"hakactl", L"cannot open console script '%s': %s", + messagef(HAKA_LOG_ERROR, "hakactl", "cannot open console script '%s': %s", entry.d_name, msg); lua_pop(state->L, 1); } @@ -108,7 +108,7 @@ static int run_console(int fd, int argc, char *argv[]) state = lua_state_init(); if (!state) { - messagef(HAKA_LOG_FATAL, L"hakactl", clear_error()); + messagef(HAKA_LOG_FATAL, "hakactl", clear_error()); return COMMAND_FAILED; } @@ -119,14 +119,14 @@ static int run_console(int fd, int argc, char *argv[]) lua_setglobal(state->L, "hakactl"); if (!initialize_console(state)) { - messagef(HAKA_LOG_FATAL, L"hakactl", clear_error()); + messagef(HAKA_LOG_FATAL, "hakactl", clear_error()); lua_state_close(state); return COMMAND_FAILED; } user = luadebug_user_readline(); if (!user) { - messagef(HAKA_LOG_FATAL, L"hakactl", clear_error()); + messagef(HAKA_LOG_FATAL, "hakactl", clear_error()); lua_state_close(state); return COMMAND_FAILED; } diff --git a/src/hakactl/ctl_comm.c b/src/hakactl/ctl_comm.c index ab013c2a..1e98895e 100644 --- a/src/hakactl/ctl_comm.c +++ b/src/hakactl/ctl_comm.c @@ -21,15 +21,15 @@ static bool ctl_check_error(int err, int expected, bool forread) { if (err == 0 && forread) { - error(L"end of file"); + error("end of file"); return false; } else if (err <= 0) { - error(L"%s", errno_error(errno)); + error("%s", errno_error(errno)); return false; } else if (err != expected) { - error(L"communication error"); + error("communication error"); return false; } return true; @@ -77,13 +77,13 @@ bool ctl_send_int(int fd, int32 i) return true; } -bool ctl_send_status(int fd, int ret, const wchar_t *err) +bool ctl_send_status(int fd, int ret, const char *err) { if (ret == -1) { - if (!err) err = L""; + if (!err) err = ""; if (!ctl_send_int(fd, ret)) return false; - return ctl_send_wchars(fd, err, -1); + return ctl_send_chars(fd, err, -1); } else { return ctl_send_int(fd, ret); @@ -98,7 +98,7 @@ int ctl_recv_status(int fd) } if (ret == -1) { - wchar_t *err = ctl_recv_wchars(fd, NULL); + char *err = ctl_recv_chars(fd, NULL); if (!err) { assert(check_error()); return -1; @@ -121,7 +121,7 @@ char *ctl_recv_chars(int fd, size_t *_len) str = malloc(len+1); if (!str) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -145,13 +145,13 @@ wchar_t *ctl_recv_wchars(int fd, size_t *_len) } if (len < 0) { - error(L"communication error"); + error("communication error"); return NULL; } str = malloc(sizeof(wchar_t)*(len+1)); if (!str) { - error(L"memory error"); + error("memory error"); return NULL; } @@ -190,7 +190,7 @@ bool ctl_expect_chars(int fd, const char *str) return false; } if (len < 0) { - error(L"communication error"); + error("communication error"); return false; } diff --git a/src/hakactl/ctl_comm.h b/src/hakactl/ctl_comm.h index 0ca55ec6..5e0dd183 100644 --- a/src/hakactl/ctl_comm.h +++ b/src/hakactl/ctl_comm.h @@ -11,7 +11,7 @@ bool ctl_send_chars(int fd, const char *str, size_t len); bool ctl_send_wchars(int fd, const wchar_t *str, size_t len); bool ctl_send_int(int fd, int32 i); -bool ctl_send_status(int fd, int ret, const wchar_t *err); +bool ctl_send_status(int fd, int ret, const char *err); char *ctl_recv_chars(int fd, size_t *len); wchar_t *ctl_recv_wchars(int fd, size_t *len); diff --git a/src/hakactl/hakactl.c b/src/hakactl/hakactl.c index 994606da..9305ecd8 100644 --- a/src/hakactl/hakactl.c +++ b/src/hakactl/hakactl.c @@ -55,9 +55,10 @@ static void help(const char *program) usage(stdout, program); fprintf(stdout, "Options:\n"); - fprintf(stdout, "\t-h,--help: Display this information\n"); - fprintf(stdout, "\t--version: Display version information\n"); - fprintf(stdout, "\t-d,--debug: Display debug output\n"); + fprintf(stdout, "\t-h,--help: Display this information\n"); + fprintf(stdout, "\t--version: Display version information\n"); + fprintf(stdout, "\t-d,--debug: Display debug output\n"); + fprintf(stdout, "\t--ctl-file Full path to socket control file\n"); fprintf(stdout, "\nCommands:\n"); while (*iter) { @@ -66,6 +67,12 @@ static void help(const char *program) } } +static char *ctl_file_path = NULL; + +static void clean_exit() +{ +} + static int parse_cmdline(int *argc, char ***argv) { int c; @@ -74,6 +81,7 @@ static int parse_cmdline(int *argc, char ***argv) { "version", no_argument, 0, 'v' }, { "help", no_argument, 0, 'h' }, { "debug", no_argument, 0, 'd' }, + { "ctl-file", required_argument, 0, 'S' }, { 0, 0, 0, 0 } }; @@ -92,6 +100,15 @@ static int parse_cmdline(int *argc, char ***argv) setlevel(HAKA_LOG_DEBUG, NULL); break; + case 'S': + ctl_file_path = strdup(optarg); + if (!ctl_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + break; + default: usage(stderr, (*argv)[0]); return ERROR_INVALID_OPTIONS; @@ -103,15 +120,20 @@ static int parse_cmdline(int *argc, char ***argv) return ERROR_INVALID_OPTIONS; } + if (!ctl_file_path) { + ctl_file_path = strdup(HAKA_CTL_SOCKET_FILE); + if (!ctl_file_path) { + message(HAKA_LOG_FATAL, "core", "memory error"); + clean_exit(); + exit(2); + } + } + *argc -= optind; *argv += optind; return -1; } -static void clean_exit() -{ -} - static int ctl_open_socket() { int fd; @@ -124,7 +146,7 @@ static int ctl_open_socket() bzero((char *)&addr, sizeof(addr)); addr.sun_family = AF_UNIX; - strcpy(addr.sun_path, HAKA_CTL_SOCKET_FILE); + strcpy(addr.sun_path, ctl_file_path); len = strlen(addr.sun_path) + sizeof(addr.sun_family); if (connect(fd, (struct sockaddr *)&addr, len)) { @@ -146,12 +168,13 @@ int main(int argc, char *argv[]) ret = parse_cmdline(&argc, &argv); if (ret >= 0) { + free(ctl_file_path); clean_exit(); return ret; } if (!module_set_default_path()) { - fprintf(stderr, "%ls\n", clear_error()); + fprintf(stderr, "%s\n", clear_error()); clean_exit(); exit(1); } diff --git a/src/hakactl/lua/hakactl.i b/src/hakactl/lua/hakactl.i index 1a27933b..728a0833 100644 --- a/src/hakactl/lua/hakactl.i +++ b/src/hakactl/lua/hakactl.i @@ -43,21 +43,21 @@ code = lua_marshal(L, 2, &codesize); if (!code) { - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } /* Send the remote command */ if (!ctl_send_chars(console_fd, "EXECUTE", -1)) { - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } if (!ctl_send_int(console_fd, thread_id) || !ctl_send_chars(console_fd, code, codesize)) { - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } @@ -73,7 +73,7 @@ while (true) { answer = ctl_recv_status(console_fd); if (answer == -1) { - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } @@ -83,14 +83,14 @@ code = ctl_recv_chars(console_fd, &codesize); if (!code) { - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } if (!lua_unmarshal(L, code, codesize)) { free(code); - lua_pushwstring(L, clear_error()); + lua_pushstring(L, clear_error()); lua_error(L); return 0; } diff --git a/version.cmake b/version.cmake index 7e6fb13b..9b2eb3a2 100644 --- a/version.cmake +++ b/version.cmake @@ -4,4 +4,4 @@ set(HAKA_VERSION_MAJOR 0) set(HAKA_VERSION_MINOR 2) -set(HAKA_VERSION_PATCH 0) +set(HAKA_VERSION_PATCH 1)