From 94e8ca09e0c22c4b6232b344c1f40f9ea568fbb8 Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Fri, 29 Mar 2019 08:39:34 +0100 Subject: [PATCH] Initial commit Signed-off-by: Robert Baldyga --- .gitmodules | 3 + Makefile | 15 + casadm/Makefile | 167 + casadm/argp.c | 803 +++++ casadm/argp.h | 133 + casadm/cas_lib.c | 2860 +++++++++++++++++ casadm/cas_lib.h | 297 ++ casadm/cas_lib_utils.c | 535 +++ casadm/cas_lib_utils.h | 68 + casadm/cas_main.c | 2024 ++++++++++++ casadm/csvparse.c | 483 +++ casadm/csvparse.h | 103 + casadm/extended_err_msg.c | 244 ++ casadm/extended_err_msg.h | 6 + casadm/intvector.c | 110 + casadm/intvector.h | 40 + casadm/ocf_env.h | 27 + casadm/ocf_env_headers.h | 22 + casadm/psort.c | 198 ++ casadm/psort.h | 22 + casadm/safeclib/ignore_handler_s.c | 72 + casadm/safeclib/mem_primitives_lib.c | 853 +++++ casadm/safeclib/mem_primitives_lib.h | 74 + casadm/safeclib/memcpy_s.c | 157 + casadm/safeclib/memmove_s.c | 148 + casadm/safeclib/memset_s.c | 105 + casadm/safeclib/safe_lib.h | 61 + casadm/safeclib/safe_lib_errno.h | 91 + casadm/safeclib/safe_mem_constraint.c | 142 + casadm/safeclib/safe_mem_constraint.h | 46 + casadm/safeclib/safe_mem_lib.h | 57 + casadm/safeclib/safe_str_constraint.c | 146 + casadm/safeclib/safe_str_constraint.h | 64 + casadm/safeclib/safe_str_lib.h | 71 + casadm/safeclib/safe_types.h | 59 + casadm/safeclib/safeclib_private.h | 93 + casadm/safeclib/strncpy_s.c | 238 ++ casadm/safeclib/strnlen_s.c | 117 + casadm/safeclib/strtok_s.c | 323 ++ casadm/statistics_model.c | 1308 ++++++++ casadm/statistics_view.c | 117 + casadm/statistics_view.h | 79 + casadm/statistics_view_csv.c | 320 ++ casadm/statistics_view_csv.h | 18 + casadm/statistics_view_raw_csv.c | 49 + casadm/statistics_view_raw_csv.h | 18 + casadm/statistics_view_structs.h | 29 + casadm/statistics_view_text.c | 1025 ++++++ casadm/statistics_view_text.h | 18 + casadm/table.c | 231 ++ casadm/table.h | 58 + casadm/upgrade.c | 40 + casadm/upgrade.h | 11 + casadm/vt100codes.h | 119 + modules/CAS_VERSION_GEN | 51 + modules/Makefile | 76 + modules/README | 17 + modules/cas_cache/.gitignore | 3 + modules/cas_cache/Makefile | 10 + modules/cas_cache/cas_cache.h | 97 + modules/cas_cache/classifier.c | 967 ++++++ modules/cas_cache/classifier.h | 33 + modules/cas_cache/classifier_defs.h | 139 + modules/cas_cache/context.c | 482 +++ modules/cas_cache/context.h | 79 + modules/cas_cache/control.c | 80 + modules/cas_cache/control.h | 11 + modules/cas_cache/layer_cache_management.c | 1863 +++++++++++ .../cas_cache/layer_cache_management.c.orig | 1615 ++++++++++ modules/cas_cache/layer_cache_management.h | 92 + modules/cas_cache/layer_upgrade.c | 1495 +++++++++ modules/cas_cache/layer_upgrade.h | 46 + modules/cas_cache/linux_kernel_version.h | 624 ++++ modules/cas_cache/main.c | 210 ++ modules/cas_cache/ocf_env.c | 284 ++ modules/cas_cache/ocf_env.h | 584 ++++ modules/cas_cache/ocf_env_headers.h | 21 + modules/cas_cache/service_ui_ioctl.c | 414 +++ modules/cas_cache/service_ui_ioctl.h | 15 + modules/cas_cache/threads.c | 281 ++ modules/cas_cache/threads.h | 26 + modules/cas_cache/utils/cas_cache_utils.h | 13 + modules/cas_cache/utils/utils_blk.c | 22 + modules/cas_cache/utils/utils_blk.h | 14 + modules/cas_cache/utils/utils_data.c | 130 + modules/cas_cache/utils/utils_data.h | 31 + modules/cas_cache/utils/utils_gc.c | 78 + modules/cas_cache/utils/utils_gc.h | 16 + modules/cas_cache/utils/utils_nvme.c | 583 ++++ modules/cas_cache/utils/utils_nvme.h | 38 + modules/cas_cache/utils/utils_properties.c | 769 +++++ modules/cas_cache/utils/utils_properties.h | 153 + modules/cas_cache/utils/utils_rpool.c | 262 ++ modules/cas_cache/utils/utils_rpool.h | 28 + modules/cas_cache/volume/obj_blk.h | 53 + .../cas_cache/volume/vol_atomic_dev_bottom.c | 1199 +++++++ .../volume/vol_atomic_dev_bottom.c.orig | 1217 +++++++ .../cas_cache/volume/vol_atomic_dev_bottom.h | 31 + modules/cas_cache/volume/vol_blk_utils.c | 470 +++ modules/cas_cache/volume/vol_blk_utils.h | 148 + .../cas_cache/volume/vol_block_dev_bottom.c | 597 ++++ .../cas_cache/volume/vol_block_dev_bottom.h | 26 + modules/cas_cache/volume/vol_block_dev_top.c | 1013 ++++++ modules/cas_cache/volume/vol_block_dev_top.h | 17 + .../volume/vol_block_dev_top.o.ur-safe | 2 + modules/cas_disk/Makefile | 12 + modules/cas_disk/cas_disk.h | 253 ++ modules/cas_disk/cas_disk_defs.h | 93 + modules/cas_disk/debug.h | 45 + modules/cas_disk/disk.c | 452 +++ modules/cas_disk/disk.h | 96 + modules/cas_disk/exp_obj.c | 842 +++++ modules/cas_disk/exp_obj.h | 59 + modules/cas_disk/exp_obj.o.ur-safe | 2 + modules/cas_disk/main.c | 165 + modules/cas_disk/sysfs.c | 35 + modules/cas_disk/sysfs.h | 21 + modules/config.mk | 94 + modules/extra.mk | 23 + modules/include/cas_ioctl_codes.h | 572 ++++ modules/include/cas_version.h | 30 + modules/tags | 1422 ++++++++ ocf | 1 + utils/60-persistent-storage-cas-load.rules | 11 + utils/60-persistent-storage-cas.rules | 38 + utils/Makefile | 71 + utils/casadm.8 | 565 ++++ utils/casctl | 142 + utils/casctl.8 | 63 + utils/ext3-config.csv | 24 + utils/ext4-config.csv | 25 + utils/ioclass-config.csv | 15 + utils/open-cas-loader | 56 + utils/open-cas-mount-utility | 26 + utils/open-cas-shutdown | 57 + utils/open-cas-shutdown.service | 14 + utils/open-cas.shutdown | 10 + utils/opencas.conf | 29 + utils/opencas.conf.5 | 61 + utils/opencas.py | 678 ++++ 140 files changed, 37144 insertions(+) create mode 100644 .gitmodules create mode 100644 Makefile create mode 100644 casadm/Makefile create mode 100644 casadm/argp.c create mode 100644 casadm/argp.h create mode 100644 casadm/cas_lib.c create mode 100644 casadm/cas_lib.h create mode 100644 casadm/cas_lib_utils.c create mode 100644 casadm/cas_lib_utils.h create mode 100644 casadm/cas_main.c create mode 100644 casadm/csvparse.c create mode 100644 casadm/csvparse.h create mode 100644 casadm/extended_err_msg.c create mode 100644 casadm/extended_err_msg.h create mode 100644 casadm/intvector.c create mode 100644 casadm/intvector.h create mode 100644 casadm/ocf_env.h create mode 100644 casadm/ocf_env_headers.h create mode 100644 casadm/psort.c create mode 100644 casadm/psort.h create mode 100644 casadm/safeclib/ignore_handler_s.c create mode 100644 casadm/safeclib/mem_primitives_lib.c create mode 100644 casadm/safeclib/mem_primitives_lib.h create mode 100644 casadm/safeclib/memcpy_s.c create mode 100644 casadm/safeclib/memmove_s.c create mode 100644 casadm/safeclib/memset_s.c create mode 100644 casadm/safeclib/safe_lib.h create mode 100644 casadm/safeclib/safe_lib_errno.h create mode 100644 casadm/safeclib/safe_mem_constraint.c create mode 100644 casadm/safeclib/safe_mem_constraint.h create mode 100644 casadm/safeclib/safe_mem_lib.h create mode 100644 casadm/safeclib/safe_str_constraint.c create mode 100644 casadm/safeclib/safe_str_constraint.h create mode 100644 casadm/safeclib/safe_str_lib.h create mode 100644 casadm/safeclib/safe_types.h create mode 100644 casadm/safeclib/safeclib_private.h create mode 100644 casadm/safeclib/strncpy_s.c create mode 100644 casadm/safeclib/strnlen_s.c create mode 100644 casadm/safeclib/strtok_s.c create mode 100644 casadm/statistics_model.c create mode 100644 casadm/statistics_view.c create mode 100644 casadm/statistics_view.h create mode 100644 casadm/statistics_view_csv.c create mode 100644 casadm/statistics_view_csv.h create mode 100644 casadm/statistics_view_raw_csv.c create mode 100644 casadm/statistics_view_raw_csv.h create mode 100644 casadm/statistics_view_structs.h create mode 100644 casadm/statistics_view_text.c create mode 100644 casadm/statistics_view_text.h create mode 100644 casadm/table.c create mode 100644 casadm/table.h create mode 100644 casadm/upgrade.c create mode 100644 casadm/upgrade.h create mode 100644 casadm/vt100codes.h create mode 100755 modules/CAS_VERSION_GEN create mode 100644 modules/Makefile create mode 100644 modules/README create mode 100644 modules/cas_cache/.gitignore create mode 100644 modules/cas_cache/Makefile create mode 100644 modules/cas_cache/cas_cache.h create mode 100644 modules/cas_cache/classifier.c create mode 100644 modules/cas_cache/classifier.h create mode 100644 modules/cas_cache/classifier_defs.h create mode 100644 modules/cas_cache/context.c create mode 100644 modules/cas_cache/context.h create mode 100644 modules/cas_cache/control.c create mode 100644 modules/cas_cache/control.h create mode 100644 modules/cas_cache/layer_cache_management.c create mode 100644 modules/cas_cache/layer_cache_management.c.orig create mode 100644 modules/cas_cache/layer_cache_management.h create mode 100644 modules/cas_cache/layer_upgrade.c create mode 100644 modules/cas_cache/layer_upgrade.h create mode 100644 modules/cas_cache/linux_kernel_version.h create mode 100644 modules/cas_cache/main.c create mode 100644 modules/cas_cache/ocf_env.c create mode 100644 modules/cas_cache/ocf_env.h create mode 100644 modules/cas_cache/ocf_env_headers.h create mode 100644 modules/cas_cache/service_ui_ioctl.c create mode 100644 modules/cas_cache/service_ui_ioctl.h create mode 100644 modules/cas_cache/threads.c create mode 100644 modules/cas_cache/threads.h create mode 100644 modules/cas_cache/utils/cas_cache_utils.h create mode 100644 modules/cas_cache/utils/utils_blk.c create mode 100644 modules/cas_cache/utils/utils_blk.h create mode 100644 modules/cas_cache/utils/utils_data.c create mode 100644 modules/cas_cache/utils/utils_data.h create mode 100644 modules/cas_cache/utils/utils_gc.c create mode 100644 modules/cas_cache/utils/utils_gc.h create mode 100644 modules/cas_cache/utils/utils_nvme.c create mode 100644 modules/cas_cache/utils/utils_nvme.h create mode 100644 modules/cas_cache/utils/utils_properties.c create mode 100644 modules/cas_cache/utils/utils_properties.h create mode 100644 modules/cas_cache/utils/utils_rpool.c create mode 100644 modules/cas_cache/utils/utils_rpool.h create mode 100644 modules/cas_cache/volume/obj_blk.h create mode 100644 modules/cas_cache/volume/vol_atomic_dev_bottom.c create mode 100644 modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig create mode 100644 modules/cas_cache/volume/vol_atomic_dev_bottom.h create mode 100644 modules/cas_cache/volume/vol_blk_utils.c create mode 100644 modules/cas_cache/volume/vol_blk_utils.h create mode 100644 modules/cas_cache/volume/vol_block_dev_bottom.c create mode 100644 modules/cas_cache/volume/vol_block_dev_bottom.h create mode 100644 modules/cas_cache/volume/vol_block_dev_top.c create mode 100644 modules/cas_cache/volume/vol_block_dev_top.h create mode 100644 modules/cas_cache/volume/vol_block_dev_top.o.ur-safe create mode 100644 modules/cas_disk/Makefile create mode 100644 modules/cas_disk/cas_disk.h create mode 100644 modules/cas_disk/cas_disk_defs.h create mode 100644 modules/cas_disk/debug.h create mode 100644 modules/cas_disk/disk.c create mode 100644 modules/cas_disk/disk.h create mode 100644 modules/cas_disk/exp_obj.c create mode 100644 modules/cas_disk/exp_obj.h create mode 100644 modules/cas_disk/exp_obj.o.ur-safe create mode 100644 modules/cas_disk/main.c create mode 100644 modules/cas_disk/sysfs.c create mode 100644 modules/cas_disk/sysfs.h create mode 100644 modules/config.mk create mode 100644 modules/extra.mk create mode 100644 modules/include/cas_ioctl_codes.h create mode 100644 modules/include/cas_version.h create mode 100644 modules/tags create mode 160000 ocf create mode 100644 utils/60-persistent-storage-cas-load.rules create mode 100644 utils/60-persistent-storage-cas.rules create mode 100644 utils/Makefile create mode 100644 utils/casadm.8 create mode 100755 utils/casctl create mode 100644 utils/casctl.8 create mode 100644 utils/ext3-config.csv create mode 100644 utils/ext4-config.csv create mode 100644 utils/ioclass-config.csv create mode 100755 utils/open-cas-loader create mode 100755 utils/open-cas-mount-utility create mode 100644 utils/open-cas-shutdown create mode 100644 utils/open-cas-shutdown.service create mode 100755 utils/open-cas.shutdown create mode 100644 utils/opencas.conf create mode 100644 utils/opencas.conf.5 create mode 100644 utils/opencas.py diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..4d792c1d2 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ocf"] + path = ocf + url = git@github.com:Open-CAS/ocf.git diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..6f9a75687 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +default: all + +DIRS:=modules casadm utils + +.PHONY: default all clean distclean $(DIRS) + +all $(MAKECMDGOALS): $(DIRS) + +$(DIRS): + cd $@ && $(MAKE) $(MAKECMDGOALS) diff --git a/casadm/Makefile b/casadm/Makefile new file mode 100644 index 000000000..022f7a065 --- /dev/null +++ b/casadm/Makefile @@ -0,0 +1,167 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +PWD:=$(shell pwd) +MODULESDIR:=$(PWD)/../modules +UTILS_DIR:=$(PWD)/../utils +BINARY_PATH = /sbin + +VERSION_FILE := $(MODULESDIR)/CAS_VERSION + +# +# Section below enables creating build with experimental features +# +ifeq ($(CAS_EXT_EXP),1) +DEFINES = WI_AVAILABLE +endif + +# +# Add defines for version +# +-include $(VERSION_FILE) +DEFINES += CAS_VERSION_MAIN=$(CAS_VERSION_MAIN) +DEFINES += CAS_VERSION_MAJOR=$(CAS_VERSION_MAJOR) +DEFINES += CAS_VERSION_MINOR=$(CAS_VERSION_MINOR) +DEFINES += CAS_BUILD_NO=\"$(CAS_BUILD_NO)\" + +# +# Additional git version +# +ifneq ($(strip $(CAS_BUILD_FLAG)),) +DEFINES += CAS_BUILD_FLAG=\"$(CAS_BUILD_FLAG)\" +endif + +# +# Include directories +# +INCLUDES = . +INCLUDES += $(MODULESDIR)/include + +OBJDIR = .obj/ +TARGET = casadm +TARGETS = $(TARGET) + +# +# Source to be complied +# + +OBJS = cas_lib.o +OBJS += cas_main.o +OBJS += argp.o +OBJS += statistics_view_csv.o +OBJS += cas_lib_utils.o +OBJS += statistics_model.o +OBJS += table.o +OBJS += psort.o +OBJS += statistics_view_text.o +OBJS += intvector.o +OBJS += statistics_view.o +OBJS += statistics_view_raw_csv.o +OBJS += csvparse.o +OBJS += extended_err_msg.o +OBJS += upgrade.o +OBJS += safeclib/memmove_s.o +OBJS += safeclib/memcpy_s.o +OBJS += safeclib/memset_s.o +OBJS += safeclib/strncpy_s.o +OBJS += safeclib/strtok_s.o +OBJS += safeclib/safe_str_constraint.o +OBJS += safeclib/ignore_handler_s.o +OBJS += safeclib/safe_mem_constraint.o +OBJS += safeclib/mem_primitives_lib.o +OBJS += safeclib/strnlen_s.o + +# +# Flags for C compilation +# +CFLAGS = $(patsubst %,-I%,$(INCLUDES)) +CFLAGS += $(patsubst %,-D%,$(DEFINES)) +ifdef DEBUG +CFLAGS += -O0 -g +else +CFLAGS += -O2 -D_FORTIFY_SOURCE=2 +endif +CFLAGS += -Wall -z relro -z now -fstack-protector -fPIC -Wformat -Wformat-security -fno-strict-aliasing + +# +# Flags for linking +# +LDFLAGS = -z noexecstack -z relro -z now -pie -pthread +# +# Targets +# + +all: sync + $(MAKE) build + +build: $(VERSION_FILE) $(TARGETS) + +sync: + @cd $(MODULESDIR) && $(MAKE) sync + +# +# Include dependencies file +# +$(TARGET): $(TARGET).a + @echo " LD " $@ + @$(CC) $(CFLAGS) $(LDFLAGS) -o $(TARGET) $< + +$(TARGET).a: $(patsubst %,$(OBJDIR)%,$(OBJS)) + @echo " AR " $@ + @ar rcs $@ $^ + @echo " AR " libcas.a + @cp -f $@ libcas.a + @ar d libcas.a $(OBJDIR)argp.o $(OBJDIR)cas_main.c + +# +# Generic target for C file +# +$(OBJDIR)%.o: %.c + @echo " CC " $< + @mkdir -p $(dir $@) +ifeq ($(strip $(CAS_VERSION_MAIN)),) + $(error "No version file") +endif + @$(CC) -c $(CFLAGS) -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@:%.o=%.d)" -o "$@" "$<" + +$(VERSION_FILE): + @echo " VERSION " $@ + @cd $(MODULESDIR) && ./CAS_VERSION_GEN + +clean: + @echo " CLEAN " + @rm -f *.a $(TARGETS) + @rm -f $(shell find -name \*.d) $(shell find -name \*.o) + +distclean: clean + @rm -f $(VERSION_FILE) + +install: + @echo "Installing casadm" + @install -m 755 $(TARGET) $(BINARY_PATH)/$(TARGET) + @install -m 644 $(UTILS_DIR)/$(TARGET).8 /usr/share/man/man8/$(TARGET).8 + + @install -m 755 -d /etc/opencas + @install -m 644 $(UTILS_DIR)/opencas.conf /etc/opencas/opencas.conf + @install -m 444 $(UTILS_DIR)/ioclass-config.csv /etc/opencas/ioclass-config.csv + @install -m 444 $(UTILS_DIR)/ext3-config.csv /etc/opencas/ext3-config.csv + @install -m 444 $(UTILS_DIR)/ext4-config.csv /etc/opencas/ext4-config.csv + + @install -m 644 $(UTILS_DIR)/opencas.conf.5 /usr/share/man/man5/opencas.conf.5 + +uninstall: + @echo "Uninstalling casadm" + @rm $(BINARY_PATH)/$(TARGET) + @rm /usr/share/man/man8/$(TARGET).8 + + @rm /etc/opencas/opencas.conf + @rm /etc/opencas/ioclass-config.csv + @rm /etc/opencas/ext3-config.csv + @rm /etc/opencas/ext4-config.csv + @rm -rf /etc/opencas + + @rm /usr/share/man/man5/opencas.conf.5 + +.PHONY: clean distclean all sync build install uninstall diff --git a/casadm/argp.c b/casadm/argp.c new file mode 100644 index 000000000..06a232867 --- /dev/null +++ b/casadm/argp.c @@ -0,0 +1,803 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include "argp.h" +#include "cas_lib.h" +#include "cas_lib_utils.h" +#include +#include + +#define PADDING " " +#define MAX_OPT_HELP_LEN 30 + +extern cas_printf_t cas_printf; + +static int is_su_requied(const cli_command* commands, int cmd) +{ + return commands[cmd].flags & CLI_SU_REQUIRED; +} + +static int is_command_hidden(const cli_command* commands, int cmd) +{ + return commands[cmd].flags & CLI_COMMAND_HIDDEN; +} + +static void print_short_usage(const app *app_values) +{ + cas_printf(LOG_INFO, "Usage: %s %s\n", app_values->name, app_values->info); +} + +static void print_info(const app *app_values) +{ + cas_printf(LOG_INFO, "Try `%s --help | -H' for more information.\n", app_values->name); +} + +char *get_short_name_string(const char short_name, char *buf) +{ + if (short_name) { + snprintf(buf, 3, "-%c", short_name); + } else { + buf[0] = 0; + } + return buf; +} + +char *command_name_with_slash(char *buf, size_t buf_size, char short_name, char *long_name) { + if (short_name) { + snprintf(buf, buf_size, "-%c/--%s", short_name, long_name); + } else { + snprintf(buf, buf_size, "--%s", long_name); + } + return buf; +} + +char *command_name_in_brackets(char *buf, size_t buf_size, char short_name, char *long_name) { + if (short_name) { + snprintf(buf, buf_size, "--%s (-%c)", long_name, short_name); + } else { + snprintf(buf, buf_size, "--%s", long_name); + } + return buf; +} + +void print_options_usage(cli_option* options, const char *separator, + int (*view)(cli_option* options, int flag), int flag) +{ + int print_separator = 0; + int i; + + if (NULL == options) { + return; + } + + for (i = 0; options[i].long_name != NULL; ++i) { + if (0 == view(&options[i], flag)) { + continue; + } + + if (print_separator) { + /* Separator */ + cas_printf(LOG_INFO, "%s", separator); + } + print_separator = 1; + + /* Long option name */ + cas_printf(LOG_INFO, "--%s", options[i].long_name); + + /* Parameter */ + if (options[i].arg != NULL) { + cas_printf(LOG_INFO, " <%s>", + options[i].arg); + } + } +} + +void print_command_header(const app *app_values, const cli_command *cmd) +{ + cas_printf(LOG_INFO, "%s%s\n\n", PADDING, + cmd->long_desc != NULL ? cmd->long_desc : cmd->desc); +} + +void print_list_options(cli_option* options, int flag, + int (*view)(cli_option* options, int flag)) +{ + char buffer[2048]; + + for (; options->long_name != NULL; options++) { + char *desc = options->desc; + char short_name[3]; + + if (0 == view(options, flag)) { + continue; + } + + if ((options->flags & CLI_OPTION_RANGE_INT) + || (options->flags & CLI_OPTION_DEFAULT_INT)) { + desc = buffer; + + if ((options->flags & CLI_OPTION_RANGE_INT) + && (options->flags + & CLI_OPTION_DEFAULT_INT)) { + snprintf(buffer, sizeof(buffer), options->desc, + options->min_value, + options->max_value, + options->default_value); + } else if (options->flags & CLI_OPTION_DEFAULT_INT) { + snprintf(buffer, sizeof(buffer), options->desc, + options->default_value); + } else if (options->flags & CLI_OPTION_RANGE_INT) { + snprintf(buffer, sizeof(buffer), options->desc, + options->min_value, + options->max_value); + } + } + + get_short_name_string(options->short_name, short_name); + if (options->arg != NULL) { + char buf[MAX_OPT_HELP_LEN]; + if (options->flags & CLI_OPTION_OPTIONAL_ARG) { + snprintf(buf, MAX_OPT_HELP_LEN, "--%s [<%s>]", + options->long_name, options->arg); + } else { + snprintf(buf, MAX_OPT_HELP_LEN, "--%s <%s>", + options->long_name, options->arg); + } + + cas_printf(LOG_INFO, "%s%-4s%-32s%s\n", PADDING, + short_name, buf, desc); + } else { + cas_printf(LOG_INFO, "%s%-4s--%-30s%s\n", PADDING, + short_name, options->long_name, desc); + } + } +} + +static void print_options_help(cli_option *options) +{ + char buffer[2048]; + int i; + + for (i = 0; options[i].long_name != NULL; ++i) { + char *desc = options[i].desc; + char short_name[3]; + if (options[i].flags & CLI_OPTION_HIDDEN) { + continue; + } + + if ((options[i].flags & CLI_OPTION_RANGE_INT) + || (options[i].flags & CLI_OPTION_DEFAULT_INT) ) { + desc = buffer; + + if ((options[i].flags & CLI_OPTION_RANGE_INT) + && (options[i].flags & CLI_OPTION_DEFAULT_INT) ) { + snprintf(buffer, sizeof(buffer), options[i].desc, + options[i].min_value, + options[i].max_value, + options[i].default_value); + } else if (options[i].flags & CLI_OPTION_DEFAULT_INT) { + snprintf(buffer, sizeof(buffer), options[i].desc, + options[i].default_value); + } else if (options[i].flags & CLI_OPTION_RANGE_INT) { + snprintf(buffer, sizeof(buffer), options[i].desc, + options[i].min_value, + options[i].max_value); + } + } + get_short_name_string(options[i].short_name, short_name); + if (options[i].arg != NULL) { + char buf[MAX_OPT_HELP_LEN]; + if (options[i].flags & CLI_OPTION_OPTIONAL_ARG) { + snprintf(buf, MAX_OPT_HELP_LEN, "--%s [<%s>]", + options[i].long_name, + options[i].arg); + } else { + snprintf(buf, MAX_OPT_HELP_LEN, "--%s <%s>", + options[i].long_name, + options[i].arg); + } + + cas_printf(LOG_INFO, "%s%-4s%-32s%s\n", PADDING, + short_name, buf, desc); + } else { + cas_printf(LOG_INFO, "%s%-4s--%-30s%s\n", PADDING, + short_name, options[i].long_name, + desc); + } + } +} + +static void print_namespace_help(app *app_values, cli_command *cmd) +{ + char command_name[MAX_STR_LEN]; + char option_name[MAX_STR_LEN]; + cli_namespace *ns = cmd->namespace; + int i; + + cas_printf(LOG_INFO, "Usage: %s --%s --%s \n\n", app_values->name, + cmd->name, ns->long_name); + + print_command_header(app_values, cmd); + + command_name_in_brackets(command_name, MAX_STR_LEN, cmd->short_name, cmd->name); + command_name_in_brackets(option_name, MAX_STR_LEN, ns->short_name, ns->long_name); + + + cas_printf(LOG_INFO, "Valid values of NAME are:\n"); + for (i = 0; ns->entries[i].name; ++i) + cas_printf(LOG_INFO, "%s%s - %s\n", PADDING, ns->entries[i].name, ns->entries[i].desc); + + cas_printf(LOG_INFO, "\n"); + + for (i = 0; ns->entries[i].name; ++i) { + cas_printf(LOG_INFO, "Options that are valid with %s %s %s are:\n", + command_name, option_name, ns->entries[i].name); + print_options_help(ns->entries[i].options); + if (ns->entries[i + 1].name) + cas_printf(LOG_INFO, "\n"); + } +} + +static void print_command_help(app *app_values, cli_command *cmd) +{ + int all_mandatory = 1; + int all_hidden = 1; + int i; + + if (cmd->help) { + (cmd->help)(app_values, cmd); + return; + } + + if (cmd->namespace) { + print_namespace_help(app_values, cmd); + return; + } + + cas_printf(LOG_INFO, "Usage: %s --%s", app_values->name, cmd->name); + + if (cmd->options != NULL) { + for (i = 0; cmd->options[i].long_name != NULL; ++i) { + if (cmd->options[i].flags & CLI_OPTION_HIDDEN) { + continue; + } + + all_hidden = 0; + + if (cmd->options[i].flags & CLI_OPTION_REQUIRED) { + cas_printf(LOG_INFO, " --%s", cmd->options[i].long_name); + if (cmd->options[i].arg != NULL) { + if (cmd->options[i].flags & CLI_OPTION_OPTIONAL_ARG) { + cas_printf(LOG_INFO, " [<%s>]", cmd->options[i].arg); + } else { + cas_printf(LOG_INFO, " <%s>", cmd->options[i].arg); + } + } + } else { + all_mandatory = 0; + } + } + + if (!all_mandatory) { + cas_printf(LOG_INFO, " [option...]"); + } + } + cas_printf(LOG_INFO, "\n\n"); + + print_command_header(app_values, cmd); + + if (cmd->options && !all_hidden) { + char option_name[MAX_STR_LEN]; + command_name_in_brackets(option_name, MAX_STR_LEN, cmd->short_name, cmd->name); + cas_printf(LOG_INFO, "Options that are valid with %s are:\n", option_name); + print_options_help(cmd->options); + } +} + +void print_help(const app *app_values, const cli_command *commands) +{ + int i; + + cas_printf(LOG_INFO, "%s\n\n", app_values->title); + print_short_usage(app_values); + cas_printf(LOG_INFO, "\nAvailable commands:\n"); + + for (i = 0;; ++i) { + char short_name[3]; + + if (commands[i].name == NULL) { + break; + } + + if (is_command_hidden(commands, i)) + continue; + + get_short_name_string(commands[i].short_name, short_name); + + cas_printf(LOG_INFO, "%s%-4s--%-25s%s\n", PADDING, short_name, + commands[i].name, commands[i].desc); + } + + cas_printf(LOG_INFO, "\nFor detailed help on the above commands use --help after the command.\n" + "e.g.\n%s%s --%s --help\n", + PADDING, app_values->name, commands[0].name); + + if (app_values->man != NULL) { + cas_printf(LOG_INFO, + "For more information, please refer to manual, Admin Guide (man %s)\n" + "or go to support page .\n", + app_values->man); + } else { + cas_printf(LOG_INFO, + "For more information, please refer to manual, Admin Guide\n" + "or go to support page .\n"); + } +} + +static int args_is_unrecognized(const char *cmd) +{ + if (strempty(cmd)) { + return 1; + } + + if ('-' == cmd[0]) { + char c = cmd[1]; + + /* Check if short option (command) is proper */ + if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) { + if ('\0' == cmd[2]) { + return 0; + } else { + return 1; + } + } + + if ('-' == cmd[1]) { + char c = cmd[2]; + /* Long option (command), check if it is valid */ + + if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) { + return 0; + } + } + } + + return 1; +} + +static int args_is(const char *in, const char *arg, const char c) +{ + if (strempty(in)) { + return 0; + } + + if ('-' == in[0]) { + if (0 != c && c == in[1]) { + if ('\0' == in[2]) { + return 1; + } + } + + if ('-' == in[1]) { + /* Long option */ + if (0 == strncmp(&(in[2]), arg, MAX_STR_LEN)) { + return 1; + } + } + } + + return 0; +} + +static int is_help(const char* cmd) +{ + return args_is(cmd, "help", 'H'); +} + +static int get_help_position(int argc, const char **argv) +{ + int i; + for (i = 2; i < argc; i++) { + if (is_help(argv[i])) { + return i; + } + } + + return -1; +} + +static int get_option(const cli_option *options, const char* opt) +{ + int i; + + for (i = 0; options[i].long_name; ++i) { + if (args_is(opt, options[i].long_name, options[i].short_name)) { + return i; + } + } + + return -1; +} + +/** + * log command as it was entered in CLI + */ +void log_command(int argc, const char **argv, int result, long long int timespan) +{ + const int def_cmd_buf_len = 100; + int cmd_buf_len = def_cmd_buf_len; + int cmd_len = 0; + int i = 0; + char *command = malloc(cmd_buf_len); + + if (!command) { + cas_printf(LOG_ERR, "Memory allocation failed for logging."); + return; + } + + for (i = 0 ; i != argc ; ++i) { + int tok_len = strnlen_s(argv[i], MAX_STR_LEN); + /* if reconstructed command width is longer than current + * cmd_buf_len (length of command buffer), than resize it + * to make it twice as big. + */ + if (tok_len + 1 + cmd_len > cmd_buf_len) { + cmd_buf_len = (tok_len + 1 + cmd_buf_len) * 2; + char *tmp = realloc(command, cmd_buf_len); + /* if reallocation failed, cancel logging */ + if (!tmp) { + cas_printf(LOG_ERR, + "Memory allocation failed for logging."); + free(command); + return; + } + command = tmp; + } + /* append additional token to a string */ + memcpy_s(command + cmd_len, cmd_buf_len - cmd_len, + argv[i], tok_len); + cmd_len += tok_len; + /* either a space or a null terminator */ + command[cmd_len] = (i == argc - 1) ? 0 : ' '; + cmd_len++; + } + + caslog(LOG_DEBUG, "Casadm invoked with: \"%s\". " + "Exit status is %d (%s). Command took %lld.%02lld s.", + command, result, result? "failure" : "success", + timespan / 1000, (timespan % 1000) / 10); + free(command); +} + +/** + * run command. Additionally log its execution and report any errors if + * they've happened + */ +int run_command(cli_command *commands, int cmd, int argc, const char **argv) +{ + int result; + const char *syslog_path = "/var/log/messages"; + /* time buffer and stat buffer after running command */ + struct timeb t0; + FILE *messages_f; + /* time buffer and stat buffer after running command */ + struct timeb t1; + long long int timespan; + + /* collect time */ + ftime(&t0); + /* collect stat buffer for syslog */ + messages_f = fopen(syslog_path, "r"); + if (messages_f) { + fseek(messages_f, 0, SEEK_END); + /* if opening file failed, don't stop command execution. + * - just omit checking for /var/log/messages at the end + */ + } else { + /* ubuntu case*/ + syslog_path = "/var/log/syslog"; + messages_f = fopen(syslog_path, "r"); + if (messages_f) { + fseek(messages_f, 0, SEEK_END); + } + } + + /* execute CAS command */ + result = commands[cmd].handle(); + ftime(&t1); + timespan = (1000 * (t1.time - t0.time) + t1.millitm - t0.millitm); + + if (commands[cmd].short_name != 'V') { + log_command(argc, argv, result, timespan); + } + + /* print extra warning message IF command ended with failure and + * syslog contains something */ + if (FAILURE == result && messages_f) { + char line_buf[MAX_STR_LEN]; + bool kernel_said = false; /* set to true if CAS kernel module + * said anything during command + * execution */ + while (fgets(line_buf, MAX_STR_LEN - 1, messages_f)) { + line_buf[MAX_STR_LEN - 1] = 0; + if (strstr(line_buf, "CAS") && + strstr(line_buf, "kernel")) { + kernel_said = true; + } + } + + if (kernel_said) { + fprintf(stderr, "Error occurred, " + "please see syslog (%s) for details. \n", + syslog_path); + } + } + + if (messages_f) { + fclose(messages_f); + } + + return result; +} + +static int count_arg_params(const char **argv, int argc) +{ + int i = 0; + + for (i = 0; i < argc; i++) { + if ('-' == argv[i][0] && 0 != argv[i][1]) { + break; + } + } + + return i; +} + +void configure_cli_commands(cli_command *commands) +{ + cli_command *cmd = commands; + int ret; + + while(cmd->name) { + if (cmd->configure) { + ret = cmd->configure(cmd); + if (ret < 0) { + cmd->flags |= CLI_COMMAND_HIDDEN; + } + } + cmd++; + } +} + +int args_parse(app *app_values, cli_command *commands, int argc, const char **argv) +{ + int i, j, k, status = SUCCESS; + int args_count, args_offset; + const char **args_list = NULL; + const char* cmd_name = argv[1]; + cli_ns_entry *entry = NULL; + cli_option *options; + int cmd, first_opt; + + if (argc < 2) { + cas_printf(LOG_ERR, "No command given.\n"); + print_info(app_values); + return FAILURE; + } + + if (args_is_unrecognized(cmd_name)) { + cas_printf(LOG_ERR, "Unrecognized command %s\n", cmd_name); + print_info(app_values); + return FAILURE; + } + + for (i = 0;; ++i) { + if (commands[i].name == NULL) { + if (is_help(cmd_name)) { + print_help(app_values, commands); + return SUCCESS; + } else { + cas_printf(LOG_ERR, "Unrecognized command %s\n", + cmd_name); + print_info(app_values); + } + return FAILURE; + } else if (args_is(cmd_name, commands[i].name, commands[i].short_name)) { + cmd = i; + break; + } + } + + configure_cli_commands(commands); + + if (argc >= 3 && get_help_position(argc, argv) != -1) { + if (!is_command_hidden(commands, i)) { + print_command_help(app_values, &commands[i]); + } + return SUCCESS; + } + + if (is_su_requied(commands, cmd)) { + if (getuid() != 0) { + cas_printf(LOG_ERR, "Must be run as root.\n"); + return FAILURE; + } + } + + if (commands[cmd].options) { + options = commands[cmd].options; + first_opt = 2; + } else if (commands[cmd].namespace) { + if (argc < 3) { + cas_printf(LOG_ERR, "Missing namespace option.\n"); + print_info(app_values); + return FAILURE; + } + if (argc < 4) { + cas_printf(LOG_ERR, "Missing namespace name.\n"); + print_info(app_values); + return FAILURE; + } + if (!args_is(argv[2], commands[cmd].namespace->long_name, + commands[cmd].namespace->short_name)) { + cas_printf(LOG_ERR, "Unrecognized option.\n"); + print_info(app_values); + return FAILURE; + } + + entry = commands[cmd].namespace->entries; + while (true) { + if (!strcmp(argv[3], entry->name)) + break; + if (!(++entry)->name) { + cas_printf(LOG_ERR, "Unrecognized namespace entry.\n"); + print_info(app_values); + return FAILURE; + } + } + options = entry->options; + first_opt = 4; + } else { + return run_command(commands, cmd, argc, argv); + } + + /* for each possible option: + * - if it is required, check if it is supplied exactly once + * - if it is not required, check if it is supplied at most once + */ + for (i = 0; options[i].long_name; ++i) { + char option_name[MAX_STR_LEN]; + + /* count occurrences of an option (k as counter) */ + k = 0; + for (j = first_opt; j < argc; ++j) { + if (args_is(argv[j], options[i].long_name, + options[i].short_name)) { + k++; + } + } + + command_name_with_slash(option_name, MAX_STR_LEN, + options[i].short_name, options[i].long_name); + + if (options[i].flags & CLI_OPTION_REQUIRED) { + if (!k) { + cas_printf(LOG_ERR, "Missing required option %s\n", option_name); + print_info(app_values); + return FAILURE; + } + } + if (k > 1) { + cas_printf(LOG_ERR, "Option supplied more than once %s\n", option_name); + print_info(app_values); + return FAILURE; + } + } + + /* Store parameters for arguments. Terminate each list with NULL element. + * Accomodate for max no of parameters */ + args_list = malloc(sizeof(*args_list) * (argc + 1)); + + if (args_list == NULL) { + return FAILURE; + } + + /* iterate over all arguments that were actually passed to the CLI */ + args_count = args_offset = 0; + for (i = first_opt; i < argc; ++i) { + int opt; + + if (args_is_unrecognized(argv[i])) { + cas_printf(LOG_ERR, "Invalid format %s\n", + argv[i]); + print_info(app_values); + + status = FAILURE; + goto free_args; + } + + opt = get_option(options, argv[i]); + if (opt == -1) { + cas_printf(LOG_ERR, "Unrecognized option %s\n", + argv[i]); + print_info(app_values); + + status = FAILURE; + goto free_args; + } + + if (options[opt].arg != NULL) { + + /* Count params for current argument. */ + args_count = count_arg_params(&argv[i + 1], argc - i - 1); + + if (args_count == 0 && !(options[opt].flags & CLI_OPTION_OPTIONAL_ARG)) { + + cas_printf(LOG_ERR, "Missing required argument in %s\n", + argv[i]); + print_info(app_values); + + status = FAILURE; + goto free_args; + } + + if (-1 != options[opt].args_count && + args_count != options[opt].args_count && + (0 != args_count || !(options[opt].flags & CLI_OPTION_OPTIONAL_ARG))) { + + cas_printf(LOG_ERR, "Invalid number of arguments for %s\n", + argv[i]); + print_info(app_values); + + status = FAILURE; + goto free_args; + } + + /* Add params for current argument. + * Terminate list with NULL element.*/ + for (k = args_offset, j = 0; j < args_count; j++) { + args_list[k++] = argv[j + i + 1]; + } + args_list[args_offset + args_count] = NULL; + + i += args_count; + } + + if (commands[cmd].command_handle_opts) { + status = commands[cmd].command_handle_opts( + options[opt].long_name, + &args_list[args_offset]); + } else if (commands[cmd].namespace_handle_opts) { + status = commands[cmd].namespace_handle_opts( + entry->name, + options[opt].long_name, + &args_list[args_offset]); + } else { + cas_printf(LOG_ERR, "Internal error\n"); + status = FAILURE; + goto free_args; + } + args_offset += args_count; + + if (0 != status) { + cas_printf(LOG_ERR, "Error during options handling\n"); + print_info(app_values); + + status = FAILURE; + goto free_args; + } + } + + status = run_command(commands, cmd, argc, argv); + +free_args: + if (NULL != args_list) { + free(args_list); + args_list = NULL; + } + + return status; +} diff --git a/casadm/argp.h b/casadm/argp.h new file mode 100644 index 000000000..292693cf0 --- /dev/null +++ b/casadm/argp.h @@ -0,0 +1,133 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef _ARGP_H +#define _ARGP_H + +#include +#include +#include +#include + +enum CLI_OPTION_FLAGS { + CLI_OPTION_REQUIRED = 1 << 0, + CLI_OPTION_HIDDEN = 1 << 1, + CLI_OPTION_RANGE_INT = 1 << 2, /*! if option has a min/max value */ + CLI_OPTION_DEFAULT_INT = 1 << 3, /*! if option has a default value */ + CLI_OPTION_OPTIONAL_ARG = 1 << 4 /*! if option argument is optional */ +}; + +enum CLI_COMMAND_FLAGS { + CLI_SU_REQUIRED = 1 << 0, + CLI_COMMAND_HIDDEN = 1 << 1 +}; + +#define ERROR -1 +#define SUCCESS 0 +#define FAILURE 1 + +/** + * structure repsesenting each single option for CLI command (i.e. -i, -j for -R) + */ +typedef struct { + char short_name; /*!< short option name, one-letter. i.e. 'i' representing -i + *!< as --cache-id */ + char* long_name; /*!< long option name (in above described case it would be + *!< "cache-id" */ + char* desc; /*!< description of an option (longer text... + *!< may contain single %d for default value and/or pair of %d marks + *!< for range of correct values. If it has both, default must come + *!< after the range, so be careful about wording such messages) */ + int args_count; /*!< number of arguments (0 - no arguments, -1 - unspecified) */ + char* arg; /*!< type of an argument, descriptive. i.e. "NUM", "NAME" */ + int flags; /*!< as per CLI_OPTION_FLAGS */ + int min_value; /*!< min parameter value. (optional) */ + int max_value; /*!< max parameter value. (optional) */ + int default_value; /*!< default parameter value. (optional) */ + int priv; /*!< Private filed for command handler */ +} cli_option; + +/* + * In namespace entries options array is nested in another flexible array + * (array of entries), so it cannot be flexible array itself. Because of that + * we make it static array of options with reasonable lenght. + */ +#define MAX_OPTIONS 32 + +typedef struct { + char* name; /*!< namespace entry name */ + char* desc; /*!< description of an namespace entry */ + cli_option options[MAX_OPTIONS]; + /*!< pointer to first element in null-terminated array of cli_option */ +} cli_ns_entry; + +typedef struct { + char short_name; /*!< short name of namespace */ + char* long_name; /*!< long name of namespace */ + cli_ns_entry entries[]; /*!< null-terminated array of namespace entries */ +} cli_namespace; + +typedef struct { + const char* name; + char* info; + char* title; + char* doc; + char* man; + int block; +} app; + +struct _cli_command; +typedef struct _cli_command cli_command; + +/** + * structure representing each CLI command, i.e. -S, -T... + */ +struct _cli_command { + char* name; /*!< name of command (i.e. "start-cache" for --start-cache) */ + + char short_name; /*!< short name of command (i.e. "S" for -S/--start-cache) */ + + char* desc; /*!< description that appears with "casadm -H" invocation */ + + char* long_desc; /*!< option descripotion that appears with "casadm -O -H invocation */ + + cli_option* options; /*!< pointer to first element in null-terminated array of cli_option */ + + int (*command_handle_opts)(char*, const char**); + /*! function pointer to function that processes options to command */ + + cli_namespace* namespace; + /*! namespace description */ + + int (*namespace_handle_opts)(char*, char*, const char**); + /*! function pointer to function that processes options to namespace */ + + int (*handle)(void); /*! function pointer to function that executes a command */ + + int flags; /*! command flags, as per CLI_COMMAND_FLAGS */ + + void (*help)(app *app_values, cli_command *cmd); + /*! Custom help provider */ + int (*configure)(cli_command *cmd); + /*! function pointer to function that configures command */ +}; + +char *command_name_in_brackets(char *buf, size_t buf_size, char short_name, char *long_name); + +void print_help(const app *app_values, const cli_command *commands); + +void print_options_usage(cli_option* options, const char *separator, + int (*view)(cli_option* options, int flag), int flag); + +void print_list_options(cli_option* options, int flag, + int (*view)(cli_option* options, int flag)); + +void print_command_header(const app *app_values, const cli_command *cmd); + +void configure_cli_commands(cli_command *commands); + +int args_parse(app *app_values, cli_command *commands, int argc, const char **argv); + +#endif diff --git a/casadm/cas_lib.c b/casadm/cas_lib.c new file mode 100644 index 000000000..3593e0cac --- /dev/null +++ b/casadm/cas_lib.c @@ -0,0 +1,2860 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cas_lib.h" +#include "extended_err_msg.h" +#include "cas_lib_utils.h" +#include "csvparse.h" +#include "statistics_view.h" +#include "safeclib/safe_mem_lib.h" +#include "safeclib/safe_str_lib.h" +#include "safeclib/safe_lib.h" +#include +#include +#include "psort.h" +#define PRINT_STAT(x) header->cmd_input.cache_stats.x + +#define CORE_ADD_MAX_TIMEOUT 30 + +#define CHECK_IF_CACHE_IS_MOUNTED -1 + +/** + * @brief Routine verifies if filesystem is currently mounted for given cache/core + * + * If FAILURE is returned, reason for failure is printed onto + * standard error. + * @param cache_id cache id of filesystem (to verify if it is mounted) + * @param core_id core id of filesystem (to verify if it is mounted); if this + * parameter is set to negative value, it is only checked if any core belonging + * to given cache is mounted; + * @return SUCCESS if is not mounted; FAILURE if filesystem is mounted + */ +int check_if_mounted(int cache_id, int core_id); + +/* KCAS_IOCTL_CACHE_CHECK_DEVICE wrapper */ +int _check_cache_device(const char *device_path, + struct kcas_cache_check_device *cmd_info); + +static const char *cache_states_name[ocf_cache_state_max + 1] = { + [ocf_cache_state_running] = "Running", + [ocf_cache_state_stopping] = "Stopping", + [ocf_cache_state_initializing] = "Initializing", + [ocf_cache_state_incomplete] = "Incomplete", + [ocf_cache_state_max] = "Unknown", +}; + +static const char *core_states_name[] = { + [ocf_core_state_active] = "Active", + [ocf_core_state_inactive] = "Inactive", +}; + +#define NOT_RUNNING_STATE "Not running" + +#define CACHE_STATE_LENGHT 20 + +#define CAS_LOG_FILE "/var/log/opencas.log" +#define CAS_LOG_LEVEL LOG_INFO + +int vcaslog(int log_level, const char *template, va_list args) +{ + FILE *log; + time_t t; + struct tm *tm; + char *timestamp; + int ret; + + if (log_level > CAS_LOG_LEVEL) + return 0; + + log = fopen(CAS_LOG_FILE, "a"); + if (!log) + return FAILURE; + + ret = lockf(fileno(log), F_LOCK, 0); + if (ret < 0) + goto out; + + t = time(NULL); + tm = localtime(&t); + if (!tm) { + ret = FAILURE; + goto out; + } + + timestamp = asctime(tm); + if (!timestamp) { + ret = FAILURE; + goto out; + } + + timestamp[strnlen(timestamp, SIZE_MAX)-1] = 0; + + fseek(log, 0, SEEK_END); + fprintf(log, "%s casadm: ", timestamp); + vfprintf(log, template, args); + fflush(log); + + lockf(fileno(log), F_ULOCK, 0); + +out: + fclose(log); + return ret; +} + +__attribute__((format(printf, 2, 3))) +int caslog(int log_level, const char *template, ...) +{ + va_list args; + va_start(args, template); + vcaslog(log_level, template, args); + va_end(args); + return 0; +} + +__attribute__((format(printf, 2, 3))) +int std_printf(int log_level, const char *template, ...) +{ + va_list args; + va_start(args, template); + if (LOG_WARNING >= log_level) { + va_list args_copy; + va_copy(args_copy, args); + vfprintf(stderr, template, args); + vcaslog(log_level, template, args_copy); + va_end(args_copy); + } else { + vfprintf(stdout, template, args); + } + va_end(args); + return 0; +} + +cas_printf_t cas_printf = std_printf; + +int validate_dev(const char *dev_path) +{ + struct fstab *fstab_entry; + fstab_entry = getfsspec(dev_path); + if (fstab_entry != NULL) { + return FAILURE; + } + return SUCCESS; +} + +int validate_path(const char *path, int exist) +{ + if (NULL == path) { + return FAILURE; + } + + if (0 == path[0]) { + cas_printf(LOG_ERR, "Empty path\n"); + return FAILURE; + } + + if (strnlen(path, MAX_STR_LEN) >= MAX_STR_LEN) { + cas_printf(LOG_ERR, "File path too long\n"); + return FAILURE; + } + + if (exist) { + struct stat _stat = { 0 }; + int result = stat(path, &_stat); + if (result) { + cas_printf(LOG_ERR, "File does not exist\n"); + return FAILURE; + } + } + + return SUCCESS; +} + +int __validate_str_num(const char *source_str, const char *msg, + long long int min, long long int max, bool validate_sbd) +{ + uint64_t ret; + char *endptr = NULL; + + errno = 0; + ret = strtoul(source_str, &endptr, 10); + if (endptr == source_str || (endptr && *endptr != '\0') || + ((ret == 0 || ret == ULONG_MAX) && errno)) { + cas_printf(LOG_ERR, "Invalid %s, must be a correct unsigned decimal integer.\n", + msg); + return FAILURE; + } else if (ret < min || ret > max) { + cas_printf(LOG_ERR, "Invalid %s, must be in the range %lld-%lld.\n", + msg, min, max); + return FAILURE; + } else if (validate_sbd && __builtin_popcount(ret) != 1) { + cas_printf(LOG_ERR, "Invalid %s, must be a power of 2.\n", msg); + return FAILURE; + } + + return SUCCESS; +} + +int validate_str_num(const char *source_str, const char *msg, long long int min, long long int max) +{ + return __validate_str_num(source_str, msg, min, max, false); +} + +int validate_str_num_sbd(const char *source_str, const char *msg, int min, int max) +{ + return __validate_str_num(source_str, msg, min, max, true); +} + +int validate_str_unum(const char *source_str, const char *msg, unsigned int min, + unsigned int max) +{ + return __validate_str_num(source_str, msg, min, max, false); +} + +struct name_to_val_mapping { + const char* short_name; + const char* long_name; + int value; +}; + +static struct name_to_val_mapping eviction_policy_names[] = { + { .short_name = "lru", .value = ocf_eviction_lru }, + { NULL } +}; + +static struct name_to_val_mapping cache_mode_names[] = { + { .short_name = "wt", .long_name = "Write-Through", .value = ocf_cache_mode_wt }, + { .short_name = "wb", .long_name = "Write-Back", .value = ocf_cache_mode_wb }, + { .short_name = "wa", .long_name = "Write-Around", .value = ocf_cache_mode_wa }, + { .short_name = "pt", .long_name = "Pass-Through", .value = ocf_cache_mode_pt }, +#ifdef WI_AVAILABLE + { .short_name = "wi", .long_name = "Write-Invalidate", .value = ocf_cache_mode_wi }, +#endif + { NULL } +}; + +static struct name_to_val_mapping cleaning_policy_names[] = { + { .short_name = "nop", .value = ocf_cleaning_nop }, + { .short_name = "alru", .value = ocf_cleaning_alru }, + { .short_name = "acp", .value = ocf_cleaning_acp }, + { NULL } +}; + +static struct name_to_val_mapping metadata_mode_names[] = { + { .short_name = "normal", .value = CAS_METADATA_MODE_NORMAL }, + { .short_name = "atomic", .value = CAS_METADATA_MODE_ATOMIC }, + { NULL } +}; + +static struct name_to_val_mapping seq_cutoff_policy_names[] = { + { .short_name = "always", .value = ocf_seq_cutoff_policy_always }, + { .short_name = "full", .value = ocf_seq_cutoff_policy_full }, + { .short_name = "never", .value = ocf_seq_cutoff_policy_never }, + { NULL } +}; + +static struct name_to_val_mapping stats_filters_names[] = { + { .short_name = "conf", .value = STATS_FILTER_CONF }, + { .short_name = "usage", .value = STATS_FILTER_USAGE }, + { .short_name = "req", .value = STATS_FILTER_REQ }, + { .short_name = "blk", .value = STATS_FILTER_BLK }, + { .short_name = "err", .value = STATS_FILTER_ERR }, + { .short_name = "all", .value = STATS_FILTER_ALL }, + { NULL } +}; + +static struct name_to_val_mapping output_formats_names[] = { + { .short_name = "table", .value = OUTPUT_FORMAT_TABLE }, + { .short_name = "csv", .value = OUTPUT_FORMAT_CSV }, + { NULL } +}; + +static struct name_to_val_mapping metadata_modes_names[] = { + { .short_name = "normal", .value = METADATA_MODE_NORMAL }, + { .short_name = "atomic", .value = METADATA_MODE_ATOMIC }, + { NULL } +}; + +static int validate_str_val_mapping(const char* s, + const struct name_to_val_mapping* mappings, + int invalid_value) +{ + int i; + + if (strempty(s)) { + return invalid_value; + } + + for (i = 0; NULL != mappings[i].short_name; ++i) { + if (0 == strncmp(mappings[i].short_name, s, MAX_STR_LEN)) { + return mappings[i].value; + } + } + + return invalid_value; +} + +static int validate_str_val_mapping_multi(const char* s, + const struct name_to_val_mapping* mappings, + int invalid_value) +{ + const char* p; + char* token; + char* delim; + int value = 0; + int token_val; + + if (strempty(s)) { + return invalid_value; + } + + p = s; + while (p[0]) { + delim = strchr(p, ','); + if (delim == p) { + /* Empty tokens not allowed */ + return invalid_value; + } + + if (delim) { + token = strndup(p, delim - p); + /* Skip token and comma */ + p = delim + 1; + if (!p[0]) { + /* Trailing comma not allowed */ + free(token); + return invalid_value; + } + } else { + size_t len = strnlen(p, MAX_STR_LEN); + if (len >= MAX_STR_LEN) { + return invalid_value; + } + + token = strdup(p); + p += len; + } + + token_val = validate_str_val_mapping(token, mappings, invalid_value); + if (token_val == invalid_value) { + free(token); + return invalid_value; + } + + value |= token_val; + free(token); + } + return value; +} + +static const char* val_to_long_name(int value, const struct name_to_val_mapping* mappings, + const char* other_name) +{ + int i; + for (i = 0; NULL != mappings[i].long_name; ++i) { + if (mappings[i].value == value) { + return mappings[i].long_name; + } + } + return other_name; +} + +static const char* val_to_short_name(int value, const struct name_to_val_mapping* mappings, + const char* other_name) +{ + int i; + for (i = 0; NULL != mappings[i].short_name; ++i) { + if (mappings[i].value == value) { + return mappings[i].short_name; + } + } + return other_name; +} + +/* Returns non-negative policy index or + * negative number in case of error. + */ +inline int validate_str_ev_policy(const char *s) +{ + return validate_str_val_mapping(s, eviction_policy_names, -1); +} + +inline const char *eviction_policy_to_name(uint8_t policy) +{ + return val_to_short_name(policy, eviction_policy_names, "Unknown"); +} + +inline const char *cache_mode_to_name(uint8_t cache_mode) +{ + return val_to_short_name(cache_mode, cache_mode_names, "Unknown mode"); +} + +static inline const char *cache_mode_to_name_long(uint8_t cache_mode) +{ + return val_to_long_name(cache_mode, cache_mode_names, "??"); +} + +inline int validate_str_cache_mode(const char *s) +{ + return validate_str_val_mapping(s, cache_mode_names, -1); +} + +inline int validate_str_cln_policy(const char *s) +{ + return validate_str_val_mapping(s, cleaning_policy_names, -1); +} + +inline const char *cleaning_policy_to_name(uint8_t policy) +{ + return val_to_short_name(policy, cleaning_policy_names, "Unknown"); +} + +const char *metadata_mode_to_name(uint8_t metadata_mode) +{ + return val_to_short_name(metadata_mode, metadata_mode_names, "Invalid"); +} + +const char *seq_cutoff_policy_to_name(uint8_t seq_cutoff_policy) +{ + return val_to_short_name(seq_cutoff_policy, + seq_cutoff_policy_names, "Invalid"); +} + +inline void metadata_memory_footprint(uint64_t size, float *footprint, + const char **units) +{ + float factor = 1; + static const char *units_names[] = {"B", "KiB", "MiB", "GiB", "TiB"}; + uint32_t i; + + for (i = 0; i < sizeof(units_names) / sizeof(units_names[0]); i++) { + *footprint = ((float) (size)) / factor; + *units = units_names[i]; + + if (*footprint < 1024.0) { + break; + } + + factor *= 1024; + } +} + +/* Returns one of or combination of STATS_FILTER values + * or STATS_FILTER_INVALID in case of error. + */ +int validate_str_stats_filters(const char* s) +{ + return validate_str_val_mapping_multi(s, stats_filters_names, + STATS_FILTER_INVALID); +} + +/* Returns one of OUTPUT_FORMAT values + * or OUTPUT_FORMAT_INVALID in case of error. + */ +int validate_str_output_format(const char* s) +{ + return validate_str_val_mapping(s, output_formats_names, + OUTPUT_FORMAT_INVALID); +} + +/* Returns one of METADATA_MODE values + * or METADATA_MODE_INVALID in case of error. + */ +int validate_str_metadata_mode(const char* s) +{ + return validate_str_val_mapping(s, metadata_modes_names, + METADATA_MODE_INVALID); +} + +void print_err(int error_code) +{ + const char *msg = cas_strerr(error_code); + + if (msg) + cas_printf(LOG_ERR, "%s\n", msg); +} + +const char *get_cache_state_name(int cache_state) +{ + int i; + /* iterate over states in reverse order, so that combined states "running&stopping" + * would be described as "stopping" */ + for (i = ocf_cache_state_max - 1; i >= 0; --i) { + if ((cache_state & (1 << i)) > 0) { + return cache_states_name[i]; + } + } + return NOT_RUNNING_STATE; +} + +const char *get_core_state_name(int core_state) +{ + if (core_state < 0 || core_state >= ocf_core_state_max) + return "Invalid"; + + return core_states_name[core_state]; +} + + +/* check if device is atomic and print information about potential slow start */ +void print_slow_atomic_cache_start_info(const char *device_path) +{ + struct kcas_cache_check_device cmd_info; + int ret = _check_cache_device(device_path, &cmd_info); + + if (!ret && cmd_info.format_atomic) { + cas_printf(LOG_INFO, + "Starting new cache instance on a device with atomic metadata format may take \n" + "several minutes depending on device model and size.\n"); + } +} + +/** + * @brief get special device file path (/dev/sdX) for disk. + */ +int get_dev_path(const char* disk, char* buf, size_t num) +{ + char *path; + int err; + + path = realpath(disk, NULL); + if (!path) + return FAILURE; + + err = strncpy_s(buf, num, path, MAX_STR_LEN); + + free(path); + return err; +} + +int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info) +{ + memset(info, 0, sizeof(*info)); + info->cache_id = cache_id; + info->core_id = core_id; + + if (ioctl(fd, KCAS_IOCTL_CORE_INFO, info) < 0) { + return FAILURE; + } + + /* internally use device special file path to describe core */ + if (get_dev_path(info->core_path_name, + info->core_path_name, + sizeof(info->core_path_name))) { + cas_printf(LOG_WARNING, "WARNING: Can not resolve path to core " + "%d from cache %d. By-id path will be shown for that core.\n", + core_id, cache_id); + } + + return SUCCESS; +} + +static int get_core_device(int cache_id, int core_id, struct core_device *core) +{ + int fd; + struct kcas_core_info cmd_info; + + if (!core) + return FAILURE; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (get_core_info(fd, cache_id, core_id, &cmd_info)) { + cas_printf(LOG_ERR, "Error while retrieving stats\n"); + print_err(cmd_info.ext_err_code); + close(fd); + return FAILURE; + } + + close(fd); + + core->id = core_id; + core->cache_id = cache_id; + strncpy_s(core->path, sizeof(core->path), cmd_info.core_path_name, + sizeof(cmd_info.core_path_name)); + memcpy_s(&core->info, sizeof(core->info), + &cmd_info, sizeof(cmd_info)); + + return SUCCESS; +} + +int get_cache_count(int fd) +{ + struct kcas_cache_count cmd; + + if (ioctl(fd, KCAS_IOCTL_GET_CACHE_COUNT, &cmd) < 0) + return 0; + + return cmd.cache_count; +} + +int *get_cache_ids(int *caches_count) +{ + int i, fd, status; + struct kcas_cache_list cache_list; + int *cache_ids = NULL; + int count, chunk_size; + + fd = open_ctrl_device(); + if (fd == -1) + return NULL; + + count = get_cache_count(fd); + + if (count <= 0) { + goto error_out; + } + + cache_ids = malloc(count * sizeof(*cache_ids)); + if (cache_ids == NULL) { + goto error_out; + } + + memset(&cache_list, 0, sizeof(cache_list)); + + *caches_count = 0; + + chunk_size = CACHE_LIST_ID_LIMIT; + cache_list.id_position = 0; + cache_list.in_out_num = chunk_size; + do { + if ((status = ioctl(fd, KCAS_IOCTL_LIST_CACHE, &cache_list)) < 0) { + if (errno != EINVAL) { + cas_printf(LOG_ERR, "Error while retrieving cache properties %d %d\n", + errno, status); + free(cache_ids); + cache_ids = NULL; + *caches_count = 0; + break; + } + } + + /* iterate through id table and get status */ + for (i = 0; i < cache_list.in_out_num; i++) { + cache_ids[(*caches_count)] = cache_list.cache_id_tab[i]; + (*caches_count)++; + if (*caches_count >= count) { + break; + } + } + + cache_list.id_position += chunk_size; + } while (cache_list.in_out_num >= chunk_size); /* repeat until there is no more devices on the list */ + +error_out: + close(fd); + return cache_ids; +} + +/** + * @brief function returns pointer to cache device given cache_info structure. + * + * structure is mallocated, and therefore it is callers responsibility to free it. + * + * @return valid pointer to a structure or NULL if error happened + */ +struct cache_device *get_cache_device(const struct kcas_cache_info *info) +{ + int core_id, cache_id, ret; + struct cache_device *cache; + struct core_device core; + cache_id = info->cache_id; + size_t cache_size; + + cache_size = sizeof(*cache); + cache_size += info->info.core_count * sizeof(cache->cores[0]); + + cache = (struct cache_device *) malloc(cache_size); + if (NULL == cache) { + return NULL; + } + + cache->core_count = 0; + cache->expected_core_count = info->info.core_count; + cache->id = cache_id; + cache->state = info->info.state; + strncpy_s(cache->device, sizeof(cache->device), info->cache_path_name, + strnlen_s(info->cache_path_name, sizeof(info->cache_path_name))); + cache->mode = info->info.cache_mode; + cache->dirty = info->info.dirty; + cache->flushed = info->info.flushed; + cache->eviction_policy = info->info.eviction_policy; + cache->cleaning_policy = info->info.cleaning_policy; + cache->size = info->info.cache_line_size; + + if ((info->info.state & (1 << ocf_cache_state_running)) == 0) { + return cache; + } + + for (cache->core_count = 0; cache->core_count < info->info.core_count; ++cache->core_count) { + core_id = info->core_id[cache->core_count]; + + ret = get_core_device(cache_id, core_id, &core); + if (0 != ret) { + break; + } else { + memcpy_s(&cache->cores[cache->core_count], + sizeof(cache->cores[cache->core_count]), + &core, sizeof(core)); + } + } + + return cache; +} + +/** + * @brief function returns pointer to cache device given cache id and fd of /dev/cas_ctrl + * + * structure is mallocated, and therefore it is callers responsibility to free it. + * + * @param fd valid file descriptor to /dev/cas_ctrl + * @param cache_id cache id (1...) + * @return valid pointer to a structure or NULL if error happened + */ +struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd) +{ + struct kcas_cache_info cmd_info; + + memset(&cmd_info, 0, sizeof(cmd_info)); + cmd_info.cache_id = cache_id; + + if (ioctl(fd, KCAS_IOCTL_CACHE_INFO, &cmd_info) < 0) { + if (errno != EINVAL) + return NULL; + } + + return get_cache_device(&cmd_info); +} + +void free_cache_devices_list(struct cache_device **caches, int caches_count) +{ + int i; + for (i = 0; i < caches_count; ++i) { + free(caches[i]); + caches[i] = NULL; + } + free(caches); +} + +struct cache_device **get_cache_devices(int *caches_count) +{ + int i, fd, status, chunk_size, count; + struct kcas_cache_list cache_list; + struct cache_device **caches = NULL; + struct cache_device *tmp_cache; + + *caches_count = -1; + + fd = open_ctrl_device(); + if (fd == -1) + return NULL; + + *caches_count = count = get_cache_count(fd); + if (count <= 0) { + goto error_out; + } + + memset(&cache_list, 0, sizeof(cache_list)); + caches = malloc(count * sizeof(*caches)); + + if (NULL == caches) { + *caches_count = -1; + goto error_out; + } + + (*caches_count) = 0; + + chunk_size = CACHE_LIST_ID_LIMIT; + + cache_list.id_position = 0; + cache_list.in_out_num = chunk_size; + do { + if ((status = ioctl(fd, KCAS_IOCTL_LIST_CACHE, &cache_list)) < 0) { + if (errno != EINVAL) { + cas_printf(LOG_ERR, "Error while retrieving cache properties %d %d\n", + errno, status); + free_cache_devices_list(caches, *caches_count); + *caches_count = -1; + caches = NULL; + goto error_out; + } + } + + /* iterate through id table and get status */ + for (i = 0; i < cache_list.in_out_num; i++) { + if ((tmp_cache = get_cache_device_by_id_fd(cache_list.cache_id_tab[i], fd)) == NULL) { + cas_printf(LOG_ERR, "Failed to retrieve cache information!\n"); + continue; + } + caches[(*caches_count)] = tmp_cache; + (*caches_count)++; + if (*caches_count >= count) { + break; + } + } + cache_list.id_position += chunk_size; + } while (cache_list.in_out_num >= chunk_size); /* repeat until there is no more devices on the list */ + +error_out: + close(fd); + return caches; +} + +int caches_compare(const void *a, const void *b) +{ + int a_id = (*(struct cache_device**)a)->id; + int b_id = (*(struct cache_device**)b)->id; + return a_id - b_id; +} + +int check_cache_already_added(const char *cache_device) { + struct cache_device **caches, *curr_cache; + int caches_count, i; + + caches = get_cache_devices(&caches_count); + + if (NULL == caches) { + return SUCCESS; + } + + for (i = 0; i < caches_count; ++i) { + curr_cache = caches[i]; + if (0 == strncmp(cache_device, curr_cache->device, MAX_STR_LEN)) { + free_cache_devices_list(caches, caches_count); + return FAILURE; + } + } + + free_cache_devices_list(caches, caches_count); + + return SUCCESS; +} + +static void check_cache_scheduler(const char *cache_device, const char *elv_name) +{ + if (strnlen_s(elv_name, MAX_ELEVATOR_NAME) == 3 && + !strncmp(elv_name, "cfq", 3)) { + cas_printf(LOG_INFO, + "I/O scheduler for cache device %s is %s. This could cause performance drop.\n" + "Consider switching I/O scheduler to deadline or noop.\n", + cache_device, elv_name); + } +} + +int start_cache(ocf_cache_id_t cache_id, unsigned int cache_init, + const char *cache_device, ocf_cache_mode_t cache_mode, + ocf_eviction_t eviction_policy_type, + ocf_cache_line_size_t line_size, int force) +{ + int fd = 0; + struct kcas_start_cache cmd; + struct cache_device **caches; + struct cache_device *cache; + int i, status, caches_count; + double min_free_ram_gb; + + /* check if cache device given exists */ + fd = open(cache_device, 0); + if (fd < 0) { + cas_printf(LOG_ERR, "Device %s not found.\n", cache_device); + return FAILURE; + } + close(fd); + + if (cache_init == CACHE_INIT_NEW) + print_slow_atomic_cache_start_info(cache_device); + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (cache_id == 0) { + cache_id = 1; + caches = get_cache_devices(&caches_count); + if (caches != NULL) { + psort(caches, caches_count, sizeof(struct cache_device*), caches_compare); + for (i = 0; i < caches_count; ++i) { + if (caches[i]->id == cache_id) { + cache_id += 1; + } + } + + free_cache_devices_list(caches, caches_count); + } + } + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cache_id = cache_id; + cmd.init_cache = cache_init; + strncpy_s(cmd.cache_path_name, + sizeof(cmd.cache_path_name), + cache_device, + strnlen_s(cache_device, + sizeof(cmd.cache_path_name))); + cmd.caching_mode = cache_mode; + cmd.eviction_policy = eviction_policy_type; + cmd.line_size = line_size; + cmd.force = (uint8_t)force; + + if (run_ioctl_interruptible(fd, KCAS_IOCTL_START_CACHE, &cmd, + "Starting cache", cache_id, OCF_CORE_ID_INVALID) < 0) { + close(fd); + if (cmd.ext_err_code == OCF_ERR_NO_FREE_RAM) { + min_free_ram_gb = cmd.min_free_ram; + min_free_ram_gb /= GiB; + + cas_printf(LOG_ERR, "Not enough free RAM.\n" + "You need at least %0.2gGB to start cache" + " with cache line size equal %llukB.\n", + min_free_ram_gb, line_size / KiB); + + if (64 * KiB > line_size) + cas_printf(LOG_ERR, "Try with greater cache line size.\n"); + + return FAILURE; + } else { + cas_printf(LOG_ERR, "Error inserting cache %d\n", cache_id); + if (OCF_ERR_NOT_OPEN_EXC == cmd.ext_err_code && + FAILURE == check_cache_already_added(cache_device)) { + cas_printf(LOG_ERR, "Cache device '%s' is already used as cache.\n", + cache_device); + } else { + print_err(cmd.ext_err_code); + } + return FAILURE; + } + } + + if (!cmd.metadata_mode_optimal) + cas_printf(LOG_NOTICE, "Selected metadata mode is not optimal for device %s.\n" + "You can improve cache performance by formating your device\n" + "to use optimal metadata mode with following command:\n" + "casadm --nvme --format atomic --device %s\n", + cache_device, cache_device); + + check_cache_scheduler(cache_device, + cmd.cache_elevator); + + status = SUCCESS; + + for (i = 0; i < CORE_ADD_MAX_TIMEOUT; ++i) { + cache = get_cache_device_by_id_fd(cache_id, fd); + status = FAILURE; + + if (cache == NULL) { + break; + } + + if (cache->core_count == cache->expected_core_count) { + if (cache->state & (1 << ocf_cache_state_incomplete)) { + cas_printf(LOG_WARNING, "WARNING: Cache is in incomplete state - at least one core is inactive\n"); + } + status = SUCCESS; + free(cache); + cache = NULL; + break; + } + + free(cache); + cache = NULL; + + sleep(1); + } + + close(fd); + + if (status == SUCCESS) { + cas_printf(LOG_INFO, "Successfully added cache instance %u\n", cache_id); + } else { + cas_printf(LOG_ERR, "Failed to start cache\n"); + return FAILURE; + } + + return SUCCESS; +} + +int stop_cache(ocf_cache_id_t cache_id, int flush) +{ + int fd = 0; + struct kcas_stop_cache cmd; + + /* don't even attempt ioctl if filesystem is mounted */ + if (check_if_mounted(cache_id, CHECK_IF_CACHE_IS_MOUNTED) == FAILURE) { + return FAILURE; + } + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + cmd.flush_data = flush; + + if(run_ioctl_interruptible(fd, KCAS_IOCTL_STOP_CACHE, &cmd, "Stopping cache", + cache_id, OCF_CORE_ID_INVALID) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + cas_printf(LOG_ERR, "You have interrupted stopping of cache. CAS continues\n" + "to operate normally. If you want to stop cache without fully\n" + "flushing dirty data, use '-n' option.\n"); + return INTERRUPTED; + } else { + cas_printf(LOG_ERR, "Error while removing cache %d\n", cache_id); + print_err(cmd.ext_err_code); + return FAILURE; + } + } + close(fd); + return SUCCESS; +} + +/* + * @brief check caching mode + * @param[in] ctrl_fd file descriptor of opened control utility + * @param[in] cache_id id of cache device + * @param[out] mode mode identifier as integer + * @return exit code of successful completion is 0; nonzero exit code means failure + */ +int get_cache_mode(int ctrl_fd, unsigned int cache_id, int *mode) +{ + struct kcas_cache_info cmd_info; + + memset(&cmd_info, 0, sizeof(cmd_info)); + cmd_info.cache_id = cache_id; + + if (ioctl(ctrl_fd, KCAS_IOCTL_CACHE_INFO, &cmd_info) < 0) + return FAILURE; + + *mode = cmd_info.info.cache_mode; + return SUCCESS; +} + +int set_cache_mode(unsigned int cache_mode, unsigned int cache_id, int flush) +{ + int fd = 0; + int orig_mode; + struct kcas_set_cache_state cmd; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (get_cache_mode(fd, cache_id, &orig_mode)) { + cas_printf(LOG_ERR, "Error while retrieving cache properties.\n"); + close(fd); + return FAILURE; + } + + /* if flushing mode is undefined, set it to default (but only if original mode is write back mode) */ + if (-1 == flush) { + if (ocf_cache_mode_wb == orig_mode) { + cas_printf(LOG_ERR, "Error: Required parameter (‘--flush-cache’) was not specified.\n"); + close(fd); + return FAILURE; + } else { + flush=NO; + } + } + + if (ocf_cache_mode_wb == orig_mode) { + if (1 == flush) { + cas_printf(LOG_INFO, "CAS is currently flushing dirty data to primary storage devices.\n"); + } else { + cas_printf(LOG_INFO, "CAS is currently migrating from Write-Back to %s mode.\n" + "Dirty data are being flushed to primary storage device in background.\n" + "Please find flushing progress via list caches command (‘casadm -L’) or\n" + "via statistics command (‘casadm -P’).\n", + cache_mode_to_name_long(cache_mode)); + } + } + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + cmd.caching_mode = cache_mode; + cmd.flush_data = flush; + + if (run_ioctl_interruptible(fd, KCAS_IOCTL_SET_CACHE_STATE, &cmd, "Setting mode", + cache_id, OCF_CORE_ID_INVALID) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + assert(flush); + cas_printf(LOG_ERR, + "Interrupted flushing of dirty data. Software prevented switching\n" + "of cache mode. If you want to switch cache mode immediately, use\n" + "'--flush-cache no' parameter.\n"); + return INTERRUPTED; + } else { + cas_printf(LOG_ERR, "Error while setting cache state for cache %d\n", + cache_id); + print_err(cmd.ext_err_code); + return FAILURE; + } + } + close(fd); + + return SUCCESS; +} + +static void print_param(FILE *intermediate_file, struct cas_param *param) +{ + if (param->value_names) { + fprintf(intermediate_file, "%s%s,%s\n", TAG(TABLE_ROW), + param->name, param->value_names[param->value]); + } else { + char *unit = param->unit ?: ""; + fprintf(intermediate_file, "%s%s,%u %s\n", TAG(TABLE_ROW), + param->name, param->value, unit); + } + fflush(intermediate_file); +} + +int core_params_set(unsigned int cache_id, unsigned int core_id, + struct cas_param *params) +{ + int cache_mode = ocf_cache_mode_none; + struct kcas_set_core_param cmd = {0}; + int fd = 0; + int i; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (get_cache_mode(fd, cache_id, &cache_mode)) { + close(fd); + return FAILURE; + } + + if (ocf_cache_mode_pt == cache_mode) { + cas_printf(LOG_WARNING, "Changing parameters for core in Pass-Through mode." + "New values will be saved but will not be effective" + "until switching to another cache mode.\n"); + } + + for (i = 0; params[i].name; ++i) { + if (!params[i].select) + continue; + + cmd.cache_id = cache_id; + cmd.core_id = core_id; + cmd.param_id = i; + cmd.param_value = params[i].value; + + if (run_ioctl(fd, KCAS_IOCTL_SET_CORE_PARAM, &cmd) < 0) { + close(fd); + return FAILURE; + } + } + + close(fd); + return SUCCESS; +} + +int core_params_get(unsigned int cache_id, unsigned int core_id, + struct cas_param *params, unsigned int output_format) +{ + struct kcas_get_core_param cmd = {0}; + FILE *intermediate_file[2]; + int fd = 0; + int i; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + close(fd); + return FAILURE; + } + + fprintf(intermediate_file[1], TAG(TABLE_HEADER) "Parameter name,Value\n"); + fflush(intermediate_file[1]); + + for (i = 0; params[i].name; ++i) { + if (!params[i].select) + continue; + + cmd.cache_id = cache_id; + cmd.core_id = core_id; + cmd.param_id = i; + + if (run_ioctl(fd, KCAS_IOCTL_GET_CORE_PARAM, &cmd) < 0) { + if (cmd.ext_err_code == OCF_ERR_CACHE_NOT_EXIST) + cas_printf(LOG_ERR, "Cache id %d not running\n", cache_id); + else if (cmd.ext_err_code == OCF_ERR_CORE_NOT_AVAIL) + cas_printf(LOG_ERR, "Core id %d not available\n", core_id); + else + cas_printf(LOG_ERR, "Can't get parameters\n"); + fclose(intermediate_file[0]); + fclose(intermediate_file[1]); + close(fd); + return FAILURE; + } + + if (params[i].transform_value) + params[i].value = params[i].transform_value(cmd.param_value); + else + params[i].value = cmd.param_value; + + print_param(intermediate_file[1], ¶ms[i]); + } + + close(fd); + + fclose(intermediate_file[1]); + stat_format_output(intermediate_file[0], stdout, output_format); + fclose(intermediate_file[0]); + + return SUCCESS; +} + +int cache_params_set(unsigned int cache_id, struct cas_param *params) +{ + int cache_mode = ocf_cache_mode_none; + struct kcas_set_cache_param cmd = {0}; + int fd = 0; + int i; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (get_cache_mode(fd, cache_id, &cache_mode)) { + close(fd); + return FAILURE; + } + + if (ocf_cache_mode_pt == cache_mode) { + cas_printf(LOG_WARNING, "Changing parameters for core in Pass-Through mode." + " New values will be saved but will not be effective" + " until switching to another cache mode.\n"); + } + + for (i = 0; params[i].name; ++i) { + if (!params[i].select) + continue; + + cmd.cache_id = cache_id; + cmd.param_id = i; + cmd.param_value = params[i].value; + + if (run_ioctl(fd, KCAS_IOCTL_SET_CACHE_PARAM, &cmd) < 0) { + close(fd); + return FAILURE; + } + } + + close(fd); + return SUCCESS; +} + +int cache_get_param(unsigned int cache_id, unsigned int param_id, + struct cas_param *param) +{ + struct kcas_get_cache_param cmd = { 0 }; + int fd = 0; + + if (param_id >= cache_param_id_max) + return FAILURE; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + cmd.param_id = param_id; + cmd.cache_id = cache_id; + + if (run_ioctl(fd, KCAS_IOCTL_GET_CACHE_PARAM, &cmd) < 0) { + if (cmd.ext_err_code == OCF_ERR_CACHE_NOT_EXIST) + cas_printf(LOG_ERR, "Cache id %d not running\n", cache_id); + else + cas_printf(LOG_ERR, "Can't get parameters\n"); + close(fd); + return FAILURE; + } + + if (param->transform_value) + param->value = param->transform_value(cmd.param_value); + else + param->value = cmd.param_value; + + close(fd); + + return SUCCESS; +} + +int cache_params_get(unsigned int cache_id, struct cas_param *params, + unsigned int output_format) +{ + struct kcas_get_cache_param cmd = {0}; + FILE *intermediate_file[2]; + int fd = 0; + int i; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + close(fd); + return FAILURE; + } + + fprintf(intermediate_file[1], TAG(TABLE_HEADER) "Parameter name,Value\n"); + fflush(intermediate_file[1]); + + for (i = 0; params[i].name; ++i) { + if (!params[i].select) + continue; + + cmd.cache_id = cache_id; + cmd.param_id = i; + + if (run_ioctl(fd, KCAS_IOCTL_GET_CACHE_PARAM, &cmd) < 0) { + if (cmd.ext_err_code == OCF_ERR_CACHE_NOT_EXIST) + cas_printf(LOG_ERR, "Cache id %d not running\n", cache_id); + else + cas_printf(LOG_ERR, "Can't get parameters\n"); + fclose(intermediate_file[0]); + fclose(intermediate_file[1]); + close(fd); + return FAILURE; + } + + if (params[i].transform_value) + params[i].value = params[i].transform_value(cmd.param_value); + else + params[i].value = cmd.param_value; + + print_param(intermediate_file[1], ¶ms[i]); + } + + close(fd); + + fclose(intermediate_file[1]); + stat_format_output(intermediate_file[0], stdout, output_format); + fclose(intermediate_file[0]); + + return SUCCESS; +} + +int check_core_already_cached(const char *core_device) { + struct cache_device **caches, *curr_cache; + struct core_device *curr_core; + int caches_count, i, j; + char core_device_path[MAX_STR_LEN]; + + if (get_dev_path(core_device, core_device_path, sizeof(core_device_path))) + return SUCCESS; + + caches = get_cache_devices(&caches_count); + + if (NULL == caches) { + return SUCCESS; + } + + for (i = 0; i < caches_count; ++i) { + curr_cache = caches[i]; + for (j = 0; j < curr_cache->core_count; ++j) { + curr_core = &curr_cache->cores[j]; + if (0 == + strncmp(core_device_path, curr_core->path, MAX_STR_LEN)) { + free_cache_devices_list(caches, caches_count); + return FAILURE; + } + } + } + + free_cache_devices_list(caches, caches_count); + + return SUCCESS; +} + +/** + * @brief convert string to int + * + * @param[in] start string beginning + * @param[out] end optional pointer to character past the end of integer + * @param[out] val integer value + * @return true in case of success, false in case of failure + */ +bool str_to_int(const char* start, char** end, int *val) +{ + long int _val; + char *_end = (char *)start; + + _val = strtol(start, &_end, 10); + + if (_end == start) { + /* no integer found */ + return false; + } + + if (_val < INT_MIN || _val > INT_MAX) { + /* value out of int range */ + return false; + } + + /* 0 might indicate strtol error, so try to check if the + input is really 0. This might not be bullet-proof, but enough + for us. */ + if (_val == 0 && *(_end - 1) != '0') { + /* doesn't look like 0, more likely a parsing error */ + return false; + } + + *val = (int)_val; + if (end) + *end = _end; + + return true; +} + + +static bool get_core_cache_id_from_string(char *str, + int *cache_id, int *core_id) +{ + char *end; + + if (!str_to_int(str, &end, cache_id)) + return false; + + if (*end != '-') { + /* invalid separator */ + return false; + } + + if (!str_to_int(end + 1, NULL, core_id)) + return false; + + return true; +} + +int get_inactive_core_count(const struct kcas_cache_info *cache_info) +{ + struct cache_device *cache; + int inactive_cores = 0; + int i; + + cache = get_cache_device(cache_info); + if (!cache) + return -1; + + for (i = 0; i < cache->core_count; i++) { + if (cache->cores[i].info.state == ocf_core_state_inactive) + inactive_cores++; + } + + free(cache); + + return inactive_cores; +} + + +/** + * @brief check for illegal recursive core configuration + * + * Function returns 1 (FAILURE/true) if it detects that adding core_device to + * cache_id will result in illegal multilevel configuration. + * Function returns 0 (SUCCESS/false) if it detects that it is fine to add + * core_device to cache_id and it will NOT result in illegal multilevel + * configuration. + * + * Here is example of such illegal configuration: + * + * type id disk device + * cache 1 /dev/sdc1 - + * +core 1 /dev/sdd1 /dev/cas1-1 + * +core 2 /dev/cas1-1 /dev/cas1-2 + * + * Here is another example of illegal configuration (notice that it is indirect, and hence + * whole multilevel caching hierarchy has to be parsed) + * + * type id disk device + * cache 1 /dev/sdc1 - + * +core 1 /dev/sdd1 /dev/cas1-1 + * +core 2 /dev/cas2-1 /dev/cas1-2 + * cache 2 /dev/sdc2 - + * +core 1 /dev/cas1-1 /dev/cas2-1 + * + * (in above example adding core 2 to cache shouldn't be allowed as this is effectively adding same + * disk device (/dev/sdd1) to the same cache (/dev/sdc1) twice). + * + * @param cache_id cache to which new core is being added + * @param core_device path to a core device that is being added + * @param fd valid file descriptor for /dev/cas_ctrl device + * @return 0 if check is successful and on illegal recursion is detected. + * 1 if illegal config detected. + */ +int illegal_recursive_core(unsigned int cache_id, const char *core_device, int core_path_size, int fd) +{ + char tmp_path[MAX_STR_LEN]; + char core_path[MAX_STR_LEN]; /* extracted actual path */ + int dev_core_id, dev_cache_id; /* cache_id and core_id for currently + * analyzed device */ + struct stat st_buf; + int i; + static const char cas_pattern[] = "/dev/cas"; + struct cache_device *cache; /*structure containing data on cache device*/ + + while (true) { + /* + * if core_device is an cas device (or a symlink to + * cas device) check if its cache device is cache id. if + * it is, return an error, as this will lead to illegal + * multilevel configuration. + */ + if (lstat(core_device, &st_buf)) { + cas_printf(LOG_ERR, "ERROR: lstat failed for %s.\n", + core_device); + return FAILURE; + } + + if (get_dev_path(core_device, core_path, sizeof(core_path))) + return FAILURE; + + /* if core_path does NOT begin with /dev/cas, report success + * as it certainly is not case of */ + if (strncmp(cas_pattern, core_path, sizeof(cas_pattern) - 1)) { + return SUCCESS; + } + + if (!get_core_cache_id_from_string( + core_path + sizeof(cas_pattern) - 1, + &dev_cache_id, + &dev_core_id)) { + cas_printf(LOG_ERR, "Failed to extract core/cache " + "id from %s path\n", core_path); + return FAILURE; + } + + if (dev_cache_id == cache_id) { + cas_printf(LOG_ERR, "Core device '%s' is already cached" + " on cache device %d. - " + "illegal multilevel caching configuration.\n", + core_device, cache_id); + return FAILURE; + } + /* possibly legal multilevel caching configuration - do one more + * iteration of this loop*/ + + /* get underlying core device of dev_cache_id-dev_core_id */ + cache = get_cache_device_by_id_fd(dev_cache_id, fd); + + if (!cache) { + cas_printf(LOG_ERR, "Failed to extract statistics for " + "cache device %d\n", dev_cache_id); + return FAILURE; + } + + /* lookup for record for appropriate core */ + for (i = 0; i != cache->core_count ; ++i) { + if (cache->cores[i].id == dev_core_id) { + strncpy_s(tmp_path, sizeof(tmp_path), + cache->cores[i].path, + strnlen_s(cache->cores[i].path, + sizeof(cache->cores[i].path))); + core_device = tmp_path; + break; + } + } + + /* make sure that loop above resulted in correct assignment */ + if (i == cache->core_count) { + cas_printf(LOG_ERR, "Failed to extract statistics for " + "core device %d-%d. Does it exist?\n", + dev_cache_id, dev_core_id); + free(cache); + return FAILURE; + } + + free(cache); + } +} + +/* Indicate whether given entry in /dev/disk/by-id should be ignored - + we ignore software created links like 'lvm-' since these can point to + both CAS exported object and core device depending on initialization order. +*/ +static bool dev_link_blacklisted(const char* entry) +{ + static const char* const prefix_blacklist[] = {"lvm"}; + static const unsigned count = ARRAY_SIZE(prefix_blacklist); + const char* curr; + unsigned i; + + for (i = 0; i < count; i++) { + curr = prefix_blacklist[i]; + if (!strncmp(entry, curr, strnlen_s(curr, MAX_STR_LEN))) + return true; + } + + return false; +} + +/* get device link starting with /dev/disk/by-id */ +static int get_dev_link(const char* disk, char* buf, size_t num) +{ + static const char dev_by_id_dir[] = "/dev/disk/by-id"; + int err; + struct dirent *entry; + DIR* dir; + char disk_dev[MAX_STR_LEN]; /* input disk device file */ + char dev_by_id[MAX_STR_LEN]; /* current device path by id */ + char curr_dev[MAX_STR_LEN]; /* current device file - compared against disk_dev[] */ + int n; + + dir = opendir(dev_by_id_dir); + if (!dir) { + /* no disk available by id? */ + cas_printf(LOG_WARNING, "Unable to open disk alias directory.\n"); + return FAILURE; + } + + if (get_dev_path(disk, disk_dev, sizeof(disk_dev))) { + err = FAILURE; + goto close_dir; + } + + err = FAILURE; + while (err != SUCCESS && (entry = readdir(dir))) { + /* check if link is blacklisted */ + if (dev_link_blacklisted(entry->d_name)) + continue; + + /* construct device-by-id path for current device */ + n = snprintf(dev_by_id, sizeof(dev_by_id), "%s/%s", + dev_by_id_dir, entry->d_name); + if (n < 0 || n >= sizeof(dev_by_id)) { + cas_printf(LOG_WARNING, + "Error constructing disk device by-link path.\n"); + continue; + } + /* get device path for current device */ + if (get_dev_path(dev_by_id, curr_dev, sizeof(curr_dev))) { + /* it's normal to have stale links in /dev/ - no log */ + continue; + } + /* compare current device path against disk device path */ + if (!strncmp(disk_dev, curr_dev, sizeof(curr_dev))) { + if (n >= num) { + cas_printf(LOG_WARNING, "Buffer to short to store device link.\n"); + } else { + strncpy_s(buf, num, dev_by_id, sizeof(dev_by_id)); + err = SUCCESS; + } + } + } + +close_dir: + closedir(dir); + + return err; +} + +static int set_core_path(char *path, const char *core_device, size_t len) +{ + /* attempt to get disk device path by id */ + if (get_dev_link(core_device, path, len) == SUCCESS) + return SUCCESS; + + /* .. if this failed, try to get standard /dev/sd* path */ + if (get_dev_path(core_device, path, len) == SUCCESS) + return SUCCESS; + + /* if everything else failed - fall back to user-provided path */ + if (!strncpy_s(path, len, core_device, strnlen_s(core_device, MAX_STR_LEN))) + return SUCCESS; + + return FAILURE; +} + +int add_core(unsigned int cache_id, unsigned int core_id, const char *core_device, + int try_add, int update_path) +{ + int fd = 0, user_core_path_size; + struct kcas_insert_core cmd; + struct stat query_core; + const char *core_path; /* core path sent down to kernel */ + const char *user_core_path; /* core path provided by user */ + + /* Check if core device provided is valid */ + fd = open(core_device, 0); + if (fd < 0) { + cas_printf(LOG_ERR, "Device %s not found.\n", core_device); + return FAILURE; + } + close(fd); + + /* Check if the core device is a block device or a file */ + if (stat(core_device, &query_core)) { + cas_printf(LOG_ERR, "Could not stat target core device %s!\n", core_device); + return FAILURE; + } + + if (!S_ISBLK(query_core.st_mode)) { + cas_printf(LOG_ERR, "Core object %s is not supported!\n", core_device); + return FAILURE; + } + + memset(&cmd, 0, sizeof(cmd)); + if (set_core_path(cmd.core_path_name, core_device, MAX_STR_LEN) != SUCCESS) { + cas_printf(LOG_ERR, "Failed to copy core path\n"); + return FAILURE; + } + + user_core_path = core_device; + user_core_path_size = strnlen_s(core_device, MAX_STR_LEN); + core_path = cmd.core_path_name; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + /* check for illegal rec ursive caching config. */ + if (illegal_recursive_core(cache_id, user_core_path, + user_core_path_size, fd)) { + close(fd); + return FAILURE; + } + + cmd.cache_id = cache_id; + cmd.core_id = core_id; + cmd.try_add = try_add; + cmd.update_path = update_path; + + if (ioctl(fd, KCAS_IOCTL_INSERT_CORE, &cmd) < 0) { + close(fd); + cas_printf(LOG_ERR, "Error while adding core device to cache instance %d\n", + cache_id); + if (OCF_ERR_NOT_OPEN_EXC == cmd.ext_err_code) { + if (FAILURE == check_core_already_cached(core_path)) { + cas_printf(LOG_ERR, "Core device '%s' is already cached.\n", + user_core_path); + } else { + cas_printf(LOG_ERR, "Failed to open '%s' device" + " exclusively. Please close all applications " + "accessing it or unmount the device.\n", + user_core_path); + } + } else { + print_err(cmd.ext_err_code); + } + return FAILURE; + } + close(fd); + + if (try_add) { + cas_printf(LOG_INFO, "Successfully added device in try add mode %s\n", user_core_path); + } else { + core_id = cmd.core_id; + + cas_printf(LOG_INFO, "Successfully added core %u to cache instance %u\n", core_id, cache_id); + } + + return SUCCESS; +} + +int check_if_mounted(int cache_id, int core_id) +{ + FILE *mtab; + struct mntent *mstruct; + char dev_buf[80]; + int dev_buf_len; + if (0 <= core_id) { + /* verify if specific core is mounted */ + snprintf(dev_buf, sizeof(dev_buf), "/dev/cas%d-%d", cache_id, core_id); + } else { + /* verify if any core from given cache is mounted */ + snprintf(dev_buf, sizeof(dev_buf), "/dev/cas%d-", cache_id); + } + dev_buf_len = strnlen(dev_buf, sizeof(dev_buf)); + + mtab = setmntent("/etc/mtab", "r"); + if (!mtab) + { + cas_printf(LOG_ERR, "Error while accessing /etc/mtab\n"); + return FAILURE; + } + + while ((mstruct = getmntent(mtab)) != NULL) { + /* mstruct->mnt_fsname is /dev/... block device path, not a mountpoint */ + if ((NULL != mstruct->mnt_fsname) + && (strncmp(mstruct->mnt_fsname, dev_buf, dev_buf_len) == 0)) { + if (core_id<0) { + cas_printf(LOG_ERR, + "Can't stop cache instance %d. Device %s is mounted!\n", + cache_id, mstruct->mnt_fsname); + } else { + cas_printf(LOG_ERR, + "Can't remove core %d from cache %d." + " Device %s is mounted!\n", + core_id, cache_id, mstruct->mnt_fsname); + } + return FAILURE; + } + } + return SUCCESS; + +} + +int remove_core(unsigned int cache_id, unsigned int core_id, + bool detach, bool force_no_flush) +{ + int fd = 0; + struct kcas_remove_core cmd; + + /* don't even attempt ioctl if filesystem is mounted */ + if (SUCCESS != check_if_mounted(cache_id, core_id)) { + return FAILURE; + } + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + cmd.core_id = core_id; + cmd.force_no_flush = force_no_flush; + cmd.detach = detach; + + if (run_ioctl_interruptible(fd, KCAS_IOCTL_REMOVE_CORE, &cmd, + "Removing core", cache_id, core_id) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + cas_printf(LOG_ERR, "You have interrupted removal of core. CAS continues to operate normally.\n"); + return INTERRUPTED; + } else { + cas_printf(LOG_ERR, "Error while removing core device %d from cache instance %d\n", + core_id, cache_id); + print_err(cmd.ext_err_code); + return FAILURE; + } + } + close(fd); + + return SUCCESS; +} + +int core_pool_remove(const char *core_device) +{ + struct kcas_core_pool_remove cmd; + int fd; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + if (set_core_path(cmd.core_path_name, core_device, MAX_STR_LEN) != SUCCESS) { + cas_printf(LOG_ERR, "Failed to copy core path\n"); + close(fd); + return FAILURE; + } + + if (ioctl(fd, KCAS_IOCTL_CORE_POOL_REMOVE, &cmd) < 0) { + cas_printf(LOG_ERR, "Error while removing device %s from core pool\n", + core_device); + print_err(cmd.ext_err_code); + close(fd); + return FAILURE; + } + + close(fd); + return SUCCESS; +} + +#define DIRTY_FLUSHING_WARNING "You have interrupted flushing of cache dirty data. CAS continues to operate\nnormally and dirty data that remains on cache device will be flushed by cleaning thread.\n" +int flush_cache(unsigned int cache_id) +{ + int fd = 0; + struct kcas_flush_cache cmd; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + /* synchronous flag */ + if (run_ioctl_interruptible(fd, KCAS_IOCTL_FLUSH_CACHE, &cmd, "Flushing cache", + cache_id, OCF_CORE_ID_INVALID) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + cas_printf(LOG_ERR, DIRTY_FLUSHING_WARNING); + return INTERRUPTED; + } else { + print_err(cmd.ext_err_code); + return FAILURE; + } + } + + close(fd); + return SUCCESS; +} + +int flush_core(unsigned int cache_id, unsigned int core_id) +{ + int fd = 0; + struct kcas_flush_core cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + cmd.core_id = core_id; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + /* synchronous flag */ + if (run_ioctl_interruptible(fd, KCAS_IOCTL_FLUSH_CORE, &cmd, "Flushing core", cache_id, core_id) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) { + cas_printf(LOG_ERR, DIRTY_FLUSHING_WARNING); + return INTERRUPTED; + } else { + print_err(cmd.ext_err_code); + return FAILURE; + } + } + close(fd); + return SUCCESS; +} + +struct partition_config_col { + const char *name; + int pos; +}; + +static struct partition_config_col partition_config_columns[] = { + { .name = "IO class id", .pos = -1 }, + { .name = "IO class name", .pos = -1 }, + { .name = "Eviction priority", .pos = -1 }, + { .name = "Allocation", .pos = -1 }, + { .name = NULL } +}; + +void partition_list_line(FILE *out, struct kcas_io_class *cls, bool csv) +{ + char buffer[128]; + const char *prio; + const char *allocation; + if (cls->info.cache_mode != ocf_cache_mode_pt) + allocation = csv ? "1" : "YES"; + else + allocation = csv ? "0" : "NO"; + + if (OCF_IO_CLASS_PRIO_PINNED == cls->info.priority) { + prio = csv ? "" : "Pinned"; + } else { + snprintf(buffer, sizeof(buffer), "%d", cls->info.priority); + prio = buffer; + } + + fprintf(out, TAG(TABLE_ROW)"%u,%s,%s,%s\n", + cls->class_id, cls->info.name, prio, allocation); + +} + +int partition_list(unsigned int cache_id, unsigned int output_format) +{ + struct kcas_io_class io_class = { .ext_err_code = 0 }; + int fd, i = 0, result = 0; + /* 1 is writing end, 0 is reading end of a pipe */ + FILE *intermediate_file[2]; + bool use_csv, first_col; + + fd = open_ctrl_device(); + if (fd == -1 ) + return FAILURE; + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + close(fd); + return FAILURE; + } + + use_csv = (output_format == OUTPUT_FORMAT_CSV); + + first_col = true; + fprintf(intermediate_file[1], TAG(TABLE_HEADER)); + for (i = 0; partition_config_columns[i].name; i++) { + if (!first_col) { + fputc(',', intermediate_file[1]); + } + fprintf(intermediate_file[1], "%s", + partition_config_columns[i].name); + first_col = false; + } + fputc('\n', intermediate_file[1]); + + for (i = 0; i < OCF_IO_CLASS_MAX; i++, io_class.ext_err_code = 0) { + io_class.cache_id = cache_id; + io_class.class_id = i; + + result = run_ioctl(fd, KCAS_IOCTL_PARTITION_STATS, &io_class); + if (result) { + if (OCF_ERR_IO_CLASS_NOT_EXIST == io_class.ext_err_code) { + result = SUCCESS; + continue; + } else { + result = FAILURE; + break; + } + } + + partition_list_line(intermediate_file[1], + &io_class, use_csv); + + } + + if (io_class.ext_err_code) { + print_err(io_class.ext_err_code); + } + + fclose(intermediate_file[1]); + if (!result && stat_format_output(intermediate_file[0], stdout, + use_csv?RAW_CSV:TEXT)) { + cas_printf(LOG_ERR, "An error occured during statistics formatting.\n"); + result = FAILURE; + } + fclose(intermediate_file[0]); + close(fd); + + return result; +} + +enum { + part_csv_coll_id = 0, part_csv_coll_name, part_csv_coll_prio, + part_csv_coll_alloc, part_csv_coll_max +}; + +int partition_is_name_valid(const char *name) +{ + int i; + int length = strnlen(name, OCF_IO_CLASS_NAME_MAX); + if (0 == length || length >= OCF_IO_CLASS_NAME_MAX) { + cas_printf(LOG_ERR, "Empty or too long IO class name\n"); + return FAILURE; + } + + for (i = 0; i < length; i++) { + if (name[i] == ',' || name[i] == '"' || + name[i] < 32 || name[i] > 126) { + cas_printf(LOG_ERR, "Only characters allowed in IO " + "class name are low ascii characters, " + "excluding control characters, comma and " + "quotation mark.\n"); + return FAILURE; + } + } + + return SUCCESS; +} + +static inline const char *partition_get_csv_col(CSVFILE *csv, int col, + int *error_col) +{ + const char *val; + + val = csv_get_col(csv, partition_config_columns[col].pos); + if (!val) { + *error_col = col; + } + return val; +} + +static inline int partition_get_line(CSVFILE *csv, + struct kcas_io_classes *cnfg, + int *error_col) +{ + uint32_t part_id; + uint32_t value; + const char *id, *name, *prio, *alloc; + + id = partition_get_csv_col(csv, part_csv_coll_id, error_col); + if (!id) { + return FAILURE; + } + name = partition_get_csv_col(csv, part_csv_coll_name, error_col); + if (!name) { + return FAILURE; + } + prio = partition_get_csv_col(csv, part_csv_coll_prio, error_col); + if (!prio) { + return FAILURE; + } + alloc = partition_get_csv_col(csv, part_csv_coll_alloc, error_col); + if (!alloc) { + return FAILURE; + } + + /* Validate ID */ + *error_col = part_csv_coll_id; + if (strempty(id)) { + return FAILURE; + } + if (validate_str_num(id, "id", 0, OCF_IO_CLASS_ID_MAX)) { + return FAILURE; + } + part_id = strtoul(id, NULL, 10); + if (part_id > OCF_IO_CLASS_ID_MAX) { + cas_printf(LOG_ERR, "Invalid partition id\n"); + return FAILURE; + } + if (!strempty(cnfg->info[part_id].name)) { + cas_printf(LOG_ERR, "Double configuration for IO class id %u\n", + part_id); + return FAILURE; + } + + /* Validate name */ + *error_col = part_csv_coll_name; + if (SUCCESS != partition_is_name_valid(name)) { + return FAILURE; + } + strncpy_s(cnfg->info[part_id].name, sizeof(cnfg->info[part_id].name), + name, strnlen_s(name, sizeof(cnfg->info[part_id].name))); + + /* Validate Priority*/ + *error_col = part_csv_coll_prio; + if (strempty(prio)) { + value = OCF_IO_CLASS_PRIO_PINNED; + } else { + if (validate_str_num(prio, "prio", OCF_IO_CLASS_PRIO_HIGHEST, + OCF_IO_CLASS_PRIO_LOWEST)) { + return FAILURE; + } + value = strtoul(prio, NULL, 10); + } + cnfg->info[part_id].priority = value; + + /* Validate Allocation */ + *error_col = part_csv_coll_alloc; + if (strempty(alloc)) { + return FAILURE; + } + if (validate_str_num(alloc, "alloc", 0, 1)) { + return FAILURE; + } + value = strtoul(alloc, NULL, 10); + if (0 == value) { + cnfg->info[part_id].cache_mode = ocf_cache_mode_pt; + } else if (1 == value) { + cnfg->info[part_id].cache_mode = ocf_cache_mode_max; + } else { + return FAILURE; + } + + cnfg->info[part_id].min_size = 0; + cnfg->info[part_id].max_size = UINT32_MAX; + + return 0; +} + +static int partition_parse_header(CSVFILE *csv) +{ + int i, j, csv_cols; + const char *col_name; + + csv_cols = csv_count_cols(csv); + for (i = 0; i < csv_cols; i++) { + col_name = csv_get_col(csv, i); + + if (!col_name) { + cas_printf(LOG_ERR, "Cannot parse configuration file.\n"); + return FAILURE; + } + + for (j = 0; partition_config_columns[j].name; j++) { + if (!strncmp(col_name, partition_config_columns[j].name, MAX_STR_LEN)) { + partition_config_columns[j].pos = i; + break; + } + } + if (!partition_config_columns[j].name) { + cas_printf(LOG_ERR, + "Cannot parse configuration file - unknown column \"%s\".\n", + col_name); + return FAILURE; + } + } + + for (i = 0; partition_config_columns[i].name; i++) { + if (partition_config_columns[i].pos < 0) { + cas_printf(LOG_ERR, + "Cannot parse configuration file - missing column \"%s\".\n", + partition_config_columns[i].name); + return FAILURE; + } + } + return SUCCESS; +} + +int partition_get_config(CSVFILE *csv, struct kcas_io_classes *cnfg, + int cache_id) +{ + int result = 0, count = 0; + int line = 1; + int error_col = -1; + + cnfg->cache_id = cache_id; + + /* before reading io class configuration check header */ + if (csv_read(csv)) { + if (csv_feof(csv)) { + cas_printf(LOG_ERR, + "Empty IO Classes configuration file" + " supplied.\n"); + return FAILURE; + } else { + cas_printf(LOG_ERR, + "I/O error occured while reading" + " IO Classes configuration file" + " supplied.\n"); + return FAILURE; + } + } + + if (partition_parse_header(csv)) { + cas_printf(LOG_ERR, "Failed to parse I/O classes" + " configuration file header. It is either" + " malformed or missing.\n" + "Please consult Admin Guide to check how" + " columns in configuration file should" + " be named.\n"); + return FAILURE; + } + + /* check all lines of input */ + while (!csv_feof(csv)) { + line++; + if (csv_read(csv)) { + if (csv_feof(csv)) { + break; + } else { + result = FAILURE; + break; + } + } + + if (part_csv_coll_max != csv_count_cols(csv)) { + if (csv_empty_line(csv)) { + continue; + } else { + result = FAILURE; + break; + } + } + + if (partition_get_line(csv, cnfg, &error_col)) { + result = FAILURE; + break; + } + + count++; + } + + if (result) { + if (error_col >= 0) { + cas_printf(LOG_ERR, + "Cannot parse configuration file - error in line %d in column %d (%s).\n", + line, + partition_config_columns[error_col].pos+1, + partition_config_columns[error_col].name); + } else { + cas_printf(LOG_ERR, "Cannot parse configuration file - error in line %d.\n", line); + } + } else if (0 == count) { + result = FAILURE; + cas_printf(LOG_ERR, "Empty configuration file\n"); + } + + return result; +} + +int partition_set_config(struct kcas_io_classes *cnfg) +{ + int fd; + int result = 0; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + result = run_ioctl(fd, KCAS_IOCTL_PARTITION_SET, cnfg); + if (result) { + if (OCF_ERR_IO_CLASS_NOT_EXIST == cnfg->ext_err_code) { + result = SUCCESS; + } else { + print_err(cnfg->ext_err_code); + result = FAILURE; + } + } + + close(fd); + return result; +} + +int partition_setup(unsigned int cache_id, const char *file) +{ + int result = 0; + CSVFILE *in; + struct kcas_io_classes *cnfg = calloc(1, KCAS_IO_CLASSES_SIZE); + + if (!cnfg) + return FAILURE; + + if (strempty(file)) { + cas_printf(LOG_ERR, "Invalid path of configuration file\n"); + result = FAILURE; + goto exit; + } + + if ('-'==file[0] && (!file[1])) { + /* configuration is supposed to be read from stdin. Setup + * a csv parser treating standard input as input file instead + * of opening a regular file */ + in = csv_fopen(stdin); + } else { + /* read ioclass configuration from a regular file */ + in = csv_open(file, "r"); + } + if (NULL == in) { + cas_printf(LOG_ERR, "Cannot open configuration file %s\n", + file); + result = FAILURE; + goto exit; + } + + if (0 == partition_get_config(in, cnfg, cache_id)) { + result = partition_set_config(cnfg); + } else { + result = FAILURE; + } + + if ('-' == file[0] && (!file[1])) { + /* free assets allocated by CSV parser without actually + * closing a file */ + csv_close_nu(in); + } else { + csv_close(in); + } + +exit: + free(cnfg); + return result; +} + +int reset_counters(unsigned int cache_id, unsigned int core_id) +{ + struct kcas_reset_stats cmd; + int fd = 0; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cache_id = cache_id; + cmd.core_id = core_id; + + if (ioctl(fd, KCAS_IOCTL_RESET_STATS, &cmd) < 0) { + close(fd); + cas_printf(LOG_ERR, "Error encountered while reseting counters\n"); + print_err(cmd.ext_err_code); + return FAILURE; + } + + close(fd); + return SUCCESS; +} + +int cas_module_version(char *buff, int size) +{ + FILE *fd; + int n_read; + + if (size <= 0 || size > MAX_STR_LEN) { + return FAILURE; + } + memset(buff, 0, size); + + fd = fopen("/sys/module/cas_cache/version", "r"); + if (!fd) { + return FAILURE; + } + + n_read = fread(buff, 1, size, fd); + if (ferror(fd)) { + n_read = 0; + } + fclose(fd); + + if (n_read > 0) { + buff[n_read - 1] = '\0'; + return SUCCESS; + } else { + return FAILURE; + } +} + +int disk_module_version(char *buff, int size) +{ + FILE *fd; + int n_read; + + if (size <= 0 || size > MAX_STR_LEN) { + return FAILURE; + } + + fd = fopen("/sys/module/cas_disk/version", "r"); + if (!fd) { + return FAILURE; + } + + n_read = fread(buff, 1, size, fd); + if (ferror(fd)) { + n_read = 0; + } + fclose(fd); + + if (n_read > 0) { + buff[n_read - 1] = '\0'; + return SUCCESS; + } else { + return FAILURE; + } +} + +float calculate_flush_progress(unsigned dirty, unsigned flushed) +{ + unsigned total_dirty; + + if (!flushed) + return 0; + + total_dirty = dirty + flushed; + return total_dirty ? 100. * flushed / total_dirty : 100; +} + +int get_flush_progress(int unsigned cache_id, float *progress) +{ + struct kcas_cache_info cmd_info; + int fd = 0; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + memset(&cmd_info, 0, sizeof(cmd_info)); + + cmd_info.cache_id = cache_id; + if (ioctl(fd, KCAS_IOCTL_CACHE_INFO, &cmd_info) < 0) { + close(fd); + return FAILURE; + } + + *progress = calculate_flush_progress(cmd_info.info.dirty, + cmd_info.info.flushed); + + close(fd); + return SUCCESS; +} + +struct list_printout_ctx +{ + FILE *intermediate; + FILE *out; + int type; + int result; +}; + +void *list_printout(void *ctx) +{ + struct list_printout_ctx *spc = ctx; + if (stat_format_output(spc->intermediate, + spc->out, spc->type)) { + cas_printf(LOG_ERR, "An error occured during statistics formatting.\n"); + spc->result = FAILURE; + } else { + spc->result = SUCCESS; + } + + return NULL; +} + +int get_core_pool_count(int fd) +{ + struct kcas_core_pool_count cmd; + + if (ioctl(fd, KCAS_IOCTL_GET_CORE_POOL_COUNT, &cmd) < 0) + return 0; + + return cmd.core_pool_count; +} + +int get_core_pool_devices(struct kcas_core_pool_path *cmd) +{ + int fd, status, result = SUCCESS; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + cmd->core_pool_count = get_core_pool_count(fd); + if (cmd->core_pool_count <= 0) { + goto error_out; + } + + cmd->core_path_tab = malloc(cmd->core_pool_count * MAX_STR_LEN); + if (NULL == cmd->core_path_tab) { + cmd->core_pool_count = 0; + goto error_out; + } + + if ((status = ioctl(fd, KCAS_IOCTL_GET_CORE_POOL_PATHS, cmd)) < 0) { + cas_printf(LOG_ERR, "Error while retrieving core pool list %d %d\n", + errno, status); + free(cmd->core_path_tab); + result = FAILURE; + goto error_out; + } + +error_out: + close(fd); + return result; +} + +int list_caches(unsigned int list_format) +{ + struct cache_device **caches, *curr_cache; + struct kcas_core_pool_path core_pool_path_cmd = {0}; + struct core_device *curr_core; + int caches_count, i, j; + /* 1 is writing end, 0 is reading end of a pipe */ + FILE *intermediate_file[2]; + int result = SUCCESS; + pthread_t thread; + struct list_printout_ctx printout_ctx; + + caches = get_cache_devices(&caches_count); + if (caches_count < 0) { + cas_printf(LOG_INFO, "Error getting caches list\n"); + return FAILURE; + } + + if (get_core_pool_devices(&core_pool_path_cmd)) { + free_cache_devices_list(caches, caches_count); + cas_printf(LOG_INFO, "Error getting cores in pool list\n"); + return FAILURE; + } + + if (caches == NULL && !core_pool_path_cmd.core_pool_count) { + cas_printf(LOG_INFO, "No caches running\n"); + return SUCCESS; + } + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + free(core_pool_path_cmd.core_path_tab); + free_cache_devices_list(caches, caches_count); + return FAILURE; + } + + printout_ctx.intermediate = intermediate_file[0]; + printout_ctx.out = stdout; + printout_ctx.type = (OUTPUT_FORMAT_CSV == list_format ? RAW_CSV : TEXT); + + if (pthread_create(&thread, 0, list_printout, &printout_ctx)) { + cas_printf(LOG_ERR,"Failed to create thread.\n"); + free(core_pool_path_cmd.core_path_tab); + free_cache_devices_list(caches, caches_count); + fclose(intermediate_file[0]); + fclose(intermediate_file[1]); + return FAILURE; + } + + if (caches_count || core_pool_path_cmd.core_pool_count) { + fprintf(intermediate_file[1], + TAG(TREE_HEADER)"%s,%s,%s,%s,%s,%s\n", + "type", "id", "disk", "status", + "write policy", "device"); + } + + if (core_pool_path_cmd.core_pool_count) { + fprintf(intermediate_file[1], TAG(TREE_BRANCH) + "%s,%s,%s,%s,%s,%s\n", + "core pool", /* type */ + "-", /* id */ + "-", + "-", + "-", /* write policy */ + "-" /* device */); + for (i = 0; i < core_pool_path_cmd.core_pool_count; i++) { + char *core_path = core_pool_path_cmd.core_path_tab + (MAX_STR_LEN * i); + if (get_dev_path(core_path, core_path, MAX_STR_LEN)) { + cas_printf(LOG_WARNING, "WARNING: Can not resolve path to core. " + "By-id path will be shown for that core.\n"); + } + fprintf(intermediate_file[1], TAG(TREE_LEAF) + "%s,%s,%s,%s,%s,%s\n", + "core", /* type */ + "-", /* id */ + core_path, + "Detached", + "-", /* write policy */ + "-" /* device */); + } + } + + for (i = 0; i < caches_count; ++i) { + curr_cache = caches[i]; + + char status_buf[CACHE_STATE_LENGHT]; + const char *tmp_status; + char mode_string[10]; + float cache_flush_prog; + float core_flush_prog; + + get_dev_path(curr_cache->device, curr_cache->device, sizeof(curr_cache->device)); + + cache_flush_prog = calculate_flush_progress(curr_cache->dirty, curr_cache->flushed); + if (cache_flush_prog) { + snprintf(status_buf, sizeof(status_buf), + "%s (%3.1f %%)", "Flushing", cache_flush_prog); + tmp_status = status_buf; + snprintf(mode_string, sizeof(mode_string), "wb->%s", + cache_mode_to_name(curr_cache->mode)); + } else { + tmp_status = get_cache_state_name(curr_cache->state); + snprintf(mode_string, sizeof(mode_string), "%s", + cache_mode_to_name(curr_cache->mode)); + } + + fprintf(intermediate_file[1], TAG(TREE_BRANCH) + "%s,%u,%s,%s,%s,%s\n", + "cache", /* type */ + curr_cache->id, /* id */ + curr_cache->device, /* device path */ + tmp_status, /* cache status */ + mode_string, /* write policy */ + "-" /* device */); + + for (j = 0; j < curr_cache->core_count; ++j) { + char* core_path; + + curr_core = &curr_cache->cores[j]; + core_path = curr_core->path; + + core_flush_prog = calculate_flush_progress(curr_core->info.stats.dirty, + curr_core->info.stats.flushed); + + if (!core_flush_prog && cache_flush_prog) { + core_flush_prog = curr_core->info.stats.dirty ? 0 : 100; + } + + if (core_flush_prog || cache_flush_prog) { + snprintf(status_buf, CACHE_STATE_LENGHT, + "%s (%3.1f %%)", "Flushing", core_flush_prog); + tmp_status = status_buf; + } else { + tmp_status = get_core_state_name(curr_core->info.state); + } + + fprintf(intermediate_file[1], TAG(TREE_LEAF) + "%s,%u,%s,%s,%s,/dev/cas%d-%d\n", + "core", /* type */ + curr_core->id, /* id */ + core_path, /* path to core*/ + tmp_status, /* core status */ + "-", /* write policy */ + curr_cache->id, /* core id (part of path)*/ + curr_core->id /* cache id (part of path)*/ ); + } + } + + free_cache_devices_list(caches, caches_count); + free(core_pool_path_cmd.core_path_tab); + + fclose(intermediate_file[1]); + pthread_join(thread, 0); + if (printout_ctx.result) { + result = 1; + cas_printf(LOG_ERR, "An error occured during list formatting.\n"); + + } + fclose(intermediate_file[0]); + return result; +} + +int _get_cas_capabilites(struct kcas_capabilites *caps, int quiet) +{ + static bool retrieved = false; + static struct kcas_capabilites caps_buf; + int status = SUCCESS; + int ctrl_fd; + if (!retrieved) { + if (quiet) { + ctrl_fd = open_ctrl_device_quiet(); + } else { + ctrl_fd = open_ctrl_device(); + } + + if (ctrl_fd < 0) { + if (!quiet) + print_err(KCAS_ERR_SYSTEM); + + return FAILURE; + } + + status = ioctl(ctrl_fd, KCAS_IOCTL_GET_CAPABILITIES, &caps_buf); + close(ctrl_fd); + + if (status) { + return FAILURE; + } + retrieved = true; + } + + memcpy_s(caps, sizeof(*caps), &caps_buf, sizeof(caps_buf)); + return status; +} + +int get_cas_capabilites_quiet(struct kcas_capabilites *caps) +{ + return _get_cas_capabilites(caps, true); +} + +int get_cas_capabilites(struct kcas_capabilites *caps) +{ + return _get_cas_capabilites(caps, false); +} + +int nvme_format(const char *device_path, int metadata_mode, int force) +{ + struct kcas_nvme_format cmd_info; + int fd; + int result = 0; + + strncpy_s(cmd_info.device_path_name, + sizeof(cmd_info.device_path_name), device_path, + strnlen_s(device_path, sizeof(cmd_info.device_path_name))); + + switch (metadata_mode) { + case METADATA_MODE_NORMAL: + cmd_info.metadata_mode = CAS_METADATA_MODE_NORMAL; + break; + case METADATA_MODE_ATOMIC: + cmd_info.metadata_mode = CAS_METADATA_MODE_ATOMIC; + break; + default: + return FAILURE; + } + cmd_info.force = force; + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + /* Format NVMe */ + result = run_ioctl(fd, KCAS_IOCTL_NVME_FORMAT, &cmd_info); + close(fd); + + if (result) { + result = cmd_info.ext_err_code ? : KCAS_ERR_SYSTEM; + cas_printf(LOG_INFO, "Changing NVMe format failed!\n"); + print_err(result); + return FAILURE; + } + + cas_printf(LOG_INFO, "Changing NVMe format succeeded.\n" + "IMPORTANT: Reboot is required!\n"); + + return SUCCESS; +} + +int _check_cache_device(const char *device_path, + struct kcas_cache_check_device *cmd_info) +{ + int result, fd; + + strncpy_s(cmd_info->path_name, sizeof(cmd_info->path_name), device_path, + strnlen_s(device_path, sizeof(cmd_info->path_name))); + + fd = open_ctrl_device(); + if (fd == -1) + return FAILURE; + + result = run_ioctl(fd, KCAS_IOCTL_CACHE_CHECK_DEVICE, cmd_info); + + close(fd); + + return result; +} + +int check_cache_device(const char *device_path) +{ + struct kcas_cache_check_device cmd_info; + FILE *intermediate_file[2]; + int result; + + result = _check_cache_device(device_path, &cmd_info); + + if (result) { + result = cmd_info.ext_err_code ? : KCAS_ERR_SYSTEM; + print_err(result); + return FAILURE; + } + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + return FAILURE; + } + + fprintf(intermediate_file[1], TAG(TABLE_HEADER) "Is cache,Clean Shutdown,Cache dirty\n"); + + fprintf(intermediate_file[1], TAG(TABLE_ROW)); + if (cmd_info.is_cache_device) { + fprintf(intermediate_file[1], "yes,%s,%s\n", + cmd_info.clean_shutdown ? "yes" : "no", + cmd_info.cache_dirty ? "yes" : "no"); + } else { + fprintf(intermediate_file[1], "no,-,-\n"); + } + + fclose(intermediate_file[1]); + stat_format_output(intermediate_file[0], stdout, RAW_CSV); + fclose(intermediate_file[0]); + + return SUCCESS; +} diff --git a/casadm/cas_lib.h b/casadm/cas_lib.h new file mode 100644 index 000000000..59e17cf58 --- /dev/null +++ b/casadm/cas_lib.h @@ -0,0 +1,297 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_LIB_H__ +#define __CAS_LIB_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "safeclib/safe_str_lib.h" +#include +#include + +#define CTRL_DEV_PATH "/dev/cas_ctrl" + +#define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) + +#define FAILURE 1 /**< default non-zero exit code. */ +#define INTERRUPTED 2 /**< if command is interrupted */ +#define SUCCESS 0 /**< 0 exit code from majority of our functions + stands for success */ + +struct core_device { + int id; + int cache_id; + char path[MAX_STR_LEN]; + struct kcas_core_info info; +}; + +struct cache_device { + int id; + int state; + int expected_core_count; + char device[MAX_STR_LEN]; + int mode; + int eviction_policy; + int cleaning_policy; + int dirty; + int flushed; + unsigned size; + int core_count; + struct core_device cores[]; +}; + +struct cas_param { + char *name; + char *unit; + char **value_names; + uint32_t (*transform_value)(uint32_t value); + uint32_t value; + bool select; +}; + +enum output_format_t { + OUTPUT_FORMAT_INVALID = 0, + OUTPUT_FORMAT_TABLE = 1, + OUTPUT_FORMAT_CSV = 2, + OUTPUT_FORMAT_DEFAULT = OUTPUT_FORMAT_TABLE +}; + +enum metadata_mode_t { + METADATA_MODE_INVALID = 0, + METADATA_MODE_NORMAL, + METADATA_MODE_ATOMIC, + METADATA_MODE_DEFAULT = METADATA_MODE_NORMAL, +}; + +#define STATS_FILTER_INVALID 0 +#define STATS_FILTER_CONF (1 << 0) +#define STATS_FILTER_USAGE (1 << 1) +#define STATS_FILTER_REQ (1 << 2) +#define STATS_FILTER_BLK (1 << 3) +#define STATS_FILTER_ERR (1 << 4) +#define STATS_FILTER_IOCLASS (1 << 5) +#define STATS_FILTER_ALL (STATS_FILTER_CONF | \ + STATS_FILTER_USAGE | \ + STATS_FILTER_REQ | \ + STATS_FILTER_BLK | \ + STATS_FILTER_ERR) +#define STATS_FILTER_DEFAULT STATS_FILTER_ALL + +#define STATS_FILTER_COUNTERS (STATS_FILTER_REQ | STATS_FILTER_BLK | STATS_FILTER_ERR) + +const char *eviction_policy_to_name(uint8_t policy); +const char *cleaning_policy_to_name(uint8_t policy); +const char *cache_mode_to_name(uint8_t cache_mode); +const char *get_cache_state_name(int cache_state); +const char *get_core_state_name(int core_state); +const char *metadata_variant_to_name(uint8_t variant); +const char *metadata_mode_to_name(uint8_t metadata_mode); +const char *seq_cutoff_policy_to_name(uint8_t seq_cutoff_policy); + +__attribute__((format(printf, 2, 3))) +typedef int (*cas_printf_t)(int log_level, const char *format, ...); + +extern cas_printf_t cas_printf; + +__attribute__((format(printf, 2, 3))) +int caslog(int log_level, const char *template, ...); + +#define CAS_CLI_HELP_METADATA_VARIANTS \ + CAS_METADATA_VARIANT_MAX"|" \ + CAS_METADATA_VARIANT_MIX"|" \ + CAS_METADATA_VARIANT_MIN + +/* for CLI commands arguments */ +#define YES 1 +#define NO 0 +#define UNDEFINED -1 +void metadata_memory_footprint(uint64_t size, float *footprint, const char **units); + +int start_cache(ocf_cache_id_t cache_id, unsigned int cache_init, + const char *cache_device, ocf_cache_mode_t cache_mode, + ocf_eviction_t eviction_policy_type, + ocf_cache_line_size_t line_size, int force); +int stop_cache(ocf_cache_id_t cache_id, int flush); + +#ifdef WI_AVAILABLE +#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi" +#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi" +#define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate" +#define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate" +#else +#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt" +#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt" +#define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through" +#define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through" +#endif + +/** + * @brief handle set cache param command + * @param cache_id id of cache device + * @param params parameter array + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ +int cache_params_set(unsigned int cache_id, struct cas_param *params); + +/** + * @brief get cache param value + * @param cache_id id of cache device + * @param param_id id of cache parameter to retrive + * @param param variable to pass value to caller + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ +int cache_get_param(unsigned int cache_id, unsigned int param_id, + struct cas_param *param); +/** + * @brief handle get cache param command + * @param cache_id id of cache device + * @param params parameter array + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ +int cache_params_get(unsigned int cache_id, struct cas_param *params, + unsigned int output_format); + +/** + * @brief handle set core param command + * @param cache_id id of cache device + * @param core_id id of core device + * @param params parameter array + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ +int core_params_set(unsigned int cache_id, unsigned int core_id, + struct cas_param *params); + +/** + * @brief handle get core param command + * @param cache_id id of cache device + * @param core_id id of core device + * @param params parameter array + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ +int core_params_get(unsigned int cache_id, unsigned int core_id, + struct cas_param *params, unsigned int output_format); + +/** + * @brief handle set cache mode (-Q) command + * @param in cache mode identifier of cache mode (WRITE_BACK, WRITE_THROUGH etc...) + * @param cache_id id of cache device + * @param flush whenever we should flush cache during execution of command. Options: YES, NO, UNDEFINED. + * (UNDEFINED is illegal when transitioning from Write-Back mode to any other mode) + */ +int set_cache_mode(unsigned int cache_state, unsigned int cache_id, int flush); + +/** + * @brief add core device to a cache + * + * @param cache_id cache to which new core is being added + * @param core_device path to a core device that is being added + * @param iogroup_id id of iogroup (this parameter is not exposed in user CLI) + * @param try_add try add core to earlier loaded cache or add to core pool + * @param update_path try update path to core device + * @return 0 upon successful core addition, 1 upon failure + */ +int add_core(unsigned int cache_id, unsigned int core_id, const char *core_device, int try_add, int update_path); + +int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info); + +int remove_core(unsigned int cache_id, unsigned int core_id, + bool detach, bool force_no_flush); + +int core_pool_remove(const char *core_device); +int get_core_pool_count(int fd); + +int reset_counters(unsigned int cache_id, unsigned int core_id); + +int flush_cache(unsigned int cache_id); +int flush_core(unsigned int cache_id, unsigned int core_id); + +int get_cas_capabilites_quiet(struct kcas_capabilites *caps); +int get_cas_capabilites(struct kcas_capabilites *caps); + +int nvme_format(const char *device_path, int metadata_mode, int force); + +int check_cache_device(const char *device_path); + +int partition_list(unsigned int cache_id, unsigned int output_format); +int partition_setup(unsigned int cache_id, const char *file); +int partition_is_name_valid(const char *name); + +int cas_module_version(char *buff, int size); +int disk_module_version(char *buff, int size); +int list_caches(unsigned int list_format); +int cache_status(unsigned int cache_id, unsigned int core_id, int io_class_id, + unsigned int stats_filters, unsigned int stats_format); +int get_inactive_core_count(const struct kcas_cache_info *cache_info); + +int open_ctrl_device_quiet(); +int open_ctrl_device(); +int *get_cache_ids(int *cache_count); +struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd); +struct cache_device **get_cache_devices(int *caches_count); +void free_cache_devices_list(struct cache_device **caches, int caches_count); + +int validate_dev(const char *dev_path); +int validate_str_num(const char *source_str, const char *msg, long long int min, long long int max); +int validate_str_num_sbd(const char *source_str, const char *msg, int min, int max); +int validate_str_unum(const char *source_str, const char *msg, unsigned int min, + unsigned int max); +int validate_path(const char *path, int exist); + +int validate_str_cache_mode(const char *s); +int validate_str_ev_policy(const char *s); +int validate_str_cln_policy(const char *s); +int validate_str_meta_variant(const char *s); +int validate_str_stats_filters(const char* s); +int validate_str_output_format(const char* s); +int validate_str_metadata_mode(const char* s); + +/** + * @brief calculate flush progress + * + * @param[in] dirty number of dirty blocks + * @param[in] flush number of flushed blocks + * @return flush progress or 0 if no flush is ongoing + */ +float calculate_flush_progress(unsigned dirty, unsigned flushed); + +/** + * @brief calculate flush progress of given cache + * + * @param[in] cache_id cache to which calculation applies + * @param[out] progress flush progress + * @return 0 on success, nonzero on failure + */ +int get_flush_progress(int unsigned cache_id, float *progress); + +/** + * @brief print error message corresponding with CAS extended error code. + */ +void print_err(int error_code); + +/** + * @brief get special device file path (/dev/sdX) for disk. + */ +int get_dev_path(const char* disk, char* buf, size_t num); + +/** + * @brief convert string to int + */ +bool str_to_int(const char* start, char** end, int *val); + +#endif diff --git a/casadm/cas_lib_utils.c b/casadm/cas_lib_utils.c new file mode 100644 index 000000000..3a080b8ff --- /dev/null +++ b/casadm/cas_lib_utils.c @@ -0,0 +1,535 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include "cas_lib.h" +#include "extended_err_msg.h" +#include "cas_lib_utils.h" +#include "safeclib/safe_str_lib.h" +#include +#include +#include +#include +#include +extern cas_printf_t cas_printf; + +#define IOCTL_RETRIES 3 /* this is how many times ioctl is called */ + +#define VT100_CLEARLINE "[K" +#define ESCAPE 0x1b +#define CARRIAGE_RETURN 0xd +#define INVALID_DIRTY_NO ((uint64_t) (-1)) + +/* file descriptors for pipe */ +/* 1 is writing end, 0 is reading end of a pipe */ +int fdspipe[2]; +/* these must be global for handling signals */ +volatile static int interrupted = 0; /*!< signal was caught, interrupt the + *!< management operation now! */ +static int finished = 0; /*!< if management operation has finished + *!< (so that progressbar drawing thread + *!< can either display "100%" or exit quietly */ +static int device_id = 0; /*!< id of caching device to which management + *!< operation that is underway, applies */ + + +/** + * Default signal handling function exists so that SIGINT wan't interrupt management + * operations in unpredicted/disallowed way. + */ +void sig_handler_default(int x) +{ + static int inter_counter = 0; + inter_counter++; + if (inter_counter>4) { + cas_printf(LOG_ERR, + "Can't interrupt CAS management process\n"); + } +} + +/** + * If management operation was interrupted due to user action (SIGINT) + */ +int was_ioctl_interrupted() +{ + return interrupted; +} + +void sig_handler_interrupt_flushing(int x) +{ + struct kcas_interrupt_flushing cmd_info; + int fd = open(CTRL_DEV_PATH, 0); + close(fdspipe[1]); + interrupted = 1; + + if (fd < 0) { + cas_printf(LOG_ERR, "Device " CTRL_DEV_PATH " not found\n"); + return; + } + + memset(&cmd_info, 0, sizeof(cmd_info)); + cmd_info.cache_id = device_id; + + int res = + run_ioctl(fd, KCAS_IOCTL_INTERRUPT_FLUSHING, &cmd_info); + + close(fd); + if (!res) { + set_default_sig_handler(); + } +} + +/** + * print current backtrace + */ +void dump_stack() +{ + const int sym_max = 512; + void *sym_buf[sym_max]; + int nsym; + nsym = backtrace(sym_buf, sym_max); + backtrace_symbols_fd(sym_buf, nsym, 2); +} + +/** + * Sad CAS :( + * dump stack to allow debugging. + */ +void segv_handler_default(int i) +{ + cas_printf(LOG_ERR, "Segmentation fault\n"); + dump_stack(); + exit(EXIT_FAILURE); +} + +/** + * register default signal handling function + */ +void set_default_sig_handler() +{ + signal(SIGINT, sig_handler_default); + signal(SIGSEGV, segv_handler_default); +} + +/** + * handle errors of cafe c library (wrong parameters passed) + */ +static void safe_lib_constraint_handler(const char *msg, void *ptr, errno_t error) +{ + cas_printf(LOG_ERR, "Safe C lib error\n"); + if (msg) { + cas_printf(LOG_ERR, "%s (%d)\n", msg, error); + } + dump_stack(); + exit(EXIT_FAILURE); +} + +/** + * Register constraint handler for safe_string_library + */ +void set_safe_lib_constraint_handler() +{ + set_mem_constraint_handler_s(safe_lib_constraint_handler); + set_str_constraint_handler_s(safe_lib_constraint_handler); +} + +int _open_ctrl_device(int quiet) +{ + int fd; + fd = open(CTRL_DEV_PATH, 0); + + if (fd < 0) { + if (!quiet) { + cas_printf(LOG_ERR, "Device " CTRL_DEV_PATH + " not found\n"); + cas_printf(LOG_INFO, "Is the kernel module loaded?\n"); + } + return -1; + } + + return fd; +} + +int open_ctrl_device_quiet() +{ + return _open_ctrl_device(true); +} + +/** + * calls open on control device; returns either error (-1) or a valid file descriptor + */ +int open_ctrl_device() +{ + return _open_ctrl_device(false); +} + +/** + * @brief print spinning wheel + */ +void print_progress_indicator(float prog, struct progress_status *ps) +{ + static const char prog_indicator[] = { '|', '/', '-', '\\', '|', '/', '-', '\\'}; + /*!< set of characters implementing "spinning wheel" progress indicator */ + static int max_i = ARRAY_SIZE(prog_indicator); + static int i = 0; + + printf("%c%s... [%c]%c" VT100_CLEARLINE, + CARRIAGE_RETURN, ps->friendly_name, prog_indicator[i], ESCAPE); + if (50 < prog) { + /* we're almost there. Ignore all signals at this stage */ + set_default_sig_handler(); + } + + i = (i + 1) % max_i; + fflush(stdout); +} + +/** + * @brief print progress bar once + * @param prog degree of progress (0-100) + * @param ps structure holding status between progressbar and caller + */ +void print_progress_bar(float prog, struct progress_status *ps) +{ + /* constants affecting look of progressbar/activity indicator */ + static const char progress_full = '='; /*!< represents progress_step of progress */ + static const char progress_partial = '-';/*!< represents progress of more than 0 + *!< but less than progress_step */ + static const char progress_empty = ' '; /*!< progressbar didn't reach here */ + static const char delimiter_left = '['; /*!< left delimiter of progress bar */ + static const char delimiter_right = ']';/*!< right delimiter of progress bar */ + static const int progress_step = 2; /*!< progress step - percentage of progress to + *!< be represented by one character. i.e. if + *!< progress stepis set to 2, entire + *!< progressbar is 50 chars wide+2 chars for + *!< delimiters */ + + int i, remaining_m; + time_t elapsed, remaining_s; + + printf("%c%s... ", CARRIAGE_RETURN, ps->friendly_name); + /* carriage return and "name of op"*/ + putchar(delimiter_left); + + /* make sure, progressbar always moves forward and never backward */ + if (prog < ps->progress_accumulated) { + prog = ps->progress_accumulated; + } else { + ps->progress_accumulated = prog; + } + + /* print actual progress bar */ + for (i = progress_step; i <= prog; i += progress_step){ + putchar(progress_full); + } + + if (((int)prog) % progress_step) { + putchar(progress_partial); + i += progress_step; + } + + for (; i <= 100; i += progress_step){ + putchar(progress_empty); + } + + elapsed = time(NULL) - ps->time_started; + + remaining_s = ((100 - prog) * elapsed) / (prog ?: 1); + remaining_m = remaining_s / 60; + remaining_s -= remaining_m * 60; + + if (remaining_m) { + /* ESCAPE VT100_CLEARLINE is terminal control sequence to clear "rest + * of the line */ + printf("%c %3.1f%% [%dm%02lds remaining]%c" VT100_CLEARLINE, + delimiter_right, prog, remaining_m, remaining_s, ESCAPE); + } else { + printf("%c %3.1f%% [%lds remaining]%c" VT100_CLEARLINE, + delimiter_right, prog, remaining_s, ESCAPE); + } + + fflush(stdout); +} + +/** + * @brief either print a progressbar or spinning wheel depending on prog + */ +void print_progress_bar_or_indicator(float prog, struct progress_status *ps) +{ + if (0.01 > prog || 99.99 < prog) { + print_progress_indicator(prog, ps); + } else { + print_progress_bar(prog, ps); + } +} + +/** + * initialize progressbar structure; + */ +void init_progress_bar(struct progress_status *ps) +{ + if (NULL != ps) { + memset(ps, 0, sizeof(*ps)); + ps->dirty_clines_curr = INVALID_DIRTY_NO; + ps->dirty_clines_initial = INVALID_DIRTY_NO; + ps->time_started = time(NULL); + } +} + +void get_core_flush_progress(int fd, int cache_id, int core_id, float *prog) +{ + struct kcas_core_info cmd_info; + + memset(&cmd_info, 0, sizeof(cmd_info)); + cmd_info.cache_id = cache_id; + cmd_info.core_id = core_id; + + if (0 == ioctl(fd, KCAS_IOCTL_CORE_INFO, &cmd_info)) { + *prog = calculate_flush_progress(cmd_info.stats.dirty, + cmd_info.stats.flushed); + } +} + +void get_cache_flush_progress(int fd, int cache_id, float *prog) +{ + struct kcas_cache_info cmd_info; + + memset(&cmd_info, 0, sizeof(cmd_info)); + cmd_info.cache_id = cache_id; + + if (0 == ioctl(fd, KCAS_IOCTL_CACHE_INFO, &cmd_info)) { + *prog = calculate_flush_progress(cmd_info.info.dirty, + cmd_info.info.flushed); + } +} + +/** + * pthread thread handling function - runs during proper ioctl execution. Prints command progress + */ +void *print_command_progress(void *th_arg) +{ + static const int + show_progressbar_after = 2; /*!< threshold in seconds */ + + int do_print_progress_bar = 0; + int mseconds = 0; /*< milliseconds */ + int fd; + float prog = 0.; + struct progress_status *ps = th_arg; + /* argument of command progress of which is monitored */ + /*1,2,0 are descriptors of stdout, err and in respectively*/ + int running_tty = isatty(1) && isatty(2) && isatty(0); + struct sigaction new_action, old_action; + + fd = open(CTRL_DEV_PATH, 0); + if (fd < 0) { + cas_printf(LOG_ERR, "Device " CTRL_DEV_PATH " not found\n"); + return NULL; /* FAILURE; */ + } + + device_id = ps->cache_id; + + sigaction(SIGINT, NULL, &old_action); + if (old_action.sa_handler != SIG_IGN) { + new_action.sa_handler = sig_handler_interrupt_flushing; + sigemptyset(&new_action.sa_mask); + new_action.sa_flags = 0; + sigaction(SIGINT, &new_action, NULL); + } + + sched_yield(); + + while (1) { + struct pollfd pfd; + struct timespec ts; + sigset_t sigmask; + int ppoll_res; + sigemptyset(&sigmask); + ts.tv_sec = 1; + ts.tv_nsec = 0; + pfd.fd = fdspipe[0]; + pfd.events = POLLIN | POLLRDHUP; + ppoll_res = ppoll(&pfd, 1, &ts, &sigmask); + if (ppoll_res < 0) { + if (ENOMEM == errno) { + sleep(1); + /* ppoll call failed due to insufficient memory */ + } else if (EINTR == errno) { + interrupted = 1; + } else { /* other error conditions are EFAULT or EINVAL + * cannot happen in realistic conditions, + * and are likely to refer to OS errors, which + * cannot possibly be handled. Perform abortion. + */ + cas_printf(LOG_ERR, "Failed ppoll"); + abort(); + } + } + mseconds += 1000; + + if (interrupted) { + /* if flushing is interrupted by signal, don't proceed with displaying + * any kind of progress bar. if bar was previously printed, + * print indicator instead */ + if (do_print_progress_bar) { + print_progress_indicator(100, ps); + } + break; + } else if (finished) { + if (do_print_progress_bar) { + print_progress_bar_or_indicator(100., ps); + } + break; + } + + if (ps->core_id == OCF_CORE_ID_INVALID) { + get_cache_flush_progress(fd, ps->cache_id, &prog); + } else { + get_core_flush_progress(fd, ps->cache_id, ps->core_id, &prog); + } + + /* it is normal that ioctl to get statistics + * fails from time to time. Most common cases + * of it are: + * - during --start-cache when cache isn't added + * - during --stopping-cache, when progress is + * supposed to read "100%", but cache is actually + * already removed and its stopping progress can't + * be queried at all. + */ + if (mseconds >= show_progressbar_after * 1000 + && running_tty && prog < 50) { + do_print_progress_bar = 1; + } + + if (do_print_progress_bar) { + print_progress_bar_or_indicator(prog, ps); + } + } + close(fdspipe[0]); + + close(fd); + + /* if progressbar was displayed at least one, clear line */ + if (do_print_progress_bar) { + printf("%c%c" VT100_CLEARLINE, CARRIAGE_RETURN, ESCAPE); + } + fflush(stdout); + return NULL; +} + +/* + * Run ioctl in a way that displays progressbar (if flushing operation takes longer) + * Catch SIGINT signal. + * @param friendly_name name of management operation that shall + * be displayed in command prompt + */ +int run_ioctl_interruptible(int fd, int command, void *cmd, + char *friendly_name, int cache_id, int core_id) +{ + pthread_t thread; + int ioctl_res; + struct progress_status ps; + sigset_t sigset; + + init_progress_bar(&ps); + ps.friendly_name = friendly_name; + ps.cache_id = cache_id; + ps.core_id = core_id; + if (pipe(fdspipe)) { + cas_printf(LOG_ERR,"Failed to allocate pipes.\n"); + return -1; + } + interrupted = 0; + + sigemptyset(&sigset); + sigaddset(&sigset, SIGINT); + pthread_sigmask(SIG_BLOCK, &sigset, NULL); + + pthread_create(&thread, 0, print_command_progress, &ps); + ioctl_res = run_ioctl(fd, command, cmd); + if (!interrupted) { + close(fdspipe[1]); + } + finished = 1; + + pthread_join(thread, 0); + + return ioctl_res; +} + +/* + * @brief ioctl wrapper that retries ioctl attempts within one second timeouts + * @param[in] fd as for IOCTL(2) + * @param[in] command as for IOCTL(2) + * @param[inout] cmd_info as for IOCTL(2) + */ +int run_ioctl(int fd, int command, void *cmd) +{ + int i, ret; + struct timespec timeout = { + .tv_sec = 1, + .tv_nsec = 0, + }; + + for (i = 0; i < IOCTL_RETRIES; i++) { + ret = ioctl(fd, command, cmd); + + if (ret < 0) { + if (interrupted) { + return -EINTR; + } if (EINTR == errno) { + return -EINTR; + } else if (EBUSY == errno) { + int nret = nanosleep(&timeout, NULL); + if (nret) { + return -EINTR; + } + } else { + return ret; + } + } else { + break; + } + } + + return ret; +} + + +int create_pipe_pair(FILE **intermediate_file) +{ + /* 1 is writing end, 0 is reading end of a pipe */ + int pipefd[2]; + + if (pipe(pipefd)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + return FAILURE; + } + + intermediate_file[0] = fdopen(pipefd[0], "r"); + if (!intermediate_file[0]) { + cas_printf(LOG_ERR,"Failed to open reading end of an unidirectional pipe.\n"); + close(pipefd[0]); + close(pipefd[1]); + return FAILURE; + } + intermediate_file[1] = fdopen(pipefd[1], "w"); + if (!intermediate_file[1]) { + cas_printf(LOG_ERR,"Failed to open reading end of an unidirectional pipe.\n"); + fclose(intermediate_file[0]); + close(pipefd[1]); + return FAILURE; + } + return SUCCESS; +} diff --git a/casadm/cas_lib_utils.h b/casadm/cas_lib_utils.h new file mode 100644 index 000000000..c4ef98b19 --- /dev/null +++ b/casadm/cas_lib_utils.h @@ -0,0 +1,68 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_LIB_UTILS_H__ +#define __CAS_LIB_UTILS_H__ + +struct progress_status { + uint64_t + dirty_clines_initial; /*!< amount of dirty clines when command is initiated */ + + uint64_t + dirty_clines_curr; /*!< amount of dirty clines at current progress level */ + + int progress_accumulated; /*!< this is to ensure that progressbar is always + *!< from 0 to 100% and progress indicated by it + *!< never actually drops. */ + int time_started; /*!< time when particular long running + *!< operation was started */ + char *friendly_name; /*!< name of management operation that shall + *!< be displayed in command prompt */ + int cache_id; /*!< cache id */ + int core_id; /*!< core id */ +}; + + +void init_progress_bar(struct progress_status *ps); +void print_progress_bar_or_indicator(float prog, struct progress_status *ps); +int run_ioctl(int fd, int command, void *cmd); +int run_ioctl_interruptible(int fd, int command, void *cmd, + char *friendly_name, int cache_id, int core_id); +int open_ctrl_device(); +int was_ioctl_interrupted(); +void set_default_sig_handler(); +void set_safe_lib_constraint_handler(); + + +/** + * function creates pair files representing an unnamed pipe. + * this is highlevel counterpart to pipe syscall. + * + * null is returned upon failure; + * + * FILE *pipes[2] is returned upon success. + * 1 is writing end, 0 is reading end of a pipe + */ +int create_pipe_pair(FILE **); + +/** + * Check if string is empty + * + * @param str - reference to the string + * @retval 1 string is empty + * @retval 0 string is not empty + */ +static inline int strempty(const char *str) +{ + if (NULL == str) { + return 1; + } else if ('\0' == str[0]) { + return 1; + } else { + return 0; + } +} + +#endif diff --git a/casadm/cas_main.c b/casadm/cas_main.c new file mode 100644 index 000000000..36bbf708a --- /dev/null +++ b/casadm/cas_main.c @@ -0,0 +1,2024 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "argp.h" +#include "cas_lib.h" +#include "cas_lib_utils.h" +#include "safeclib/safe_str_lib.h" +#include +#include "upgrade.h" +#include "statistics_view.h" + +#define HELP_HEADER OCF_PREFIX_LONG + +#define WRONG_DEVICE_ERROR "Specified caching device '%s' is not supported.\n" +#define NOT_BLOCK_ERROR "Please use block device file.\n" + +extern cas_printf_t cas_printf; + +#define PARAM_TYPE_CORE 1 +#define PARAM_TYPE_CACHE 2 + +/* struct with all the commands parameters/flags with default values */ +struct command_args{ + int force; + int cache_id; + int core_id; + int state; + int cache_mode; + int stats_filters; + int output_format; + int io_class_id; + int eviction_policy_type; + int line_size; + int cache_state_flush; + int flush_data; + int cleaning_policy_type; + int script_subcmd; + int try_add; + int update_path; + int detach; + int no_flush; + const char* cache_device; + const char* core_device; + uint32_t params_type; + uint32_t params_count; + bool verbose; +}; + +static struct command_args command_args_values = { + .force = 0, + .cache_id = 0, + .core_id = OCF_CORE_ID_INVALID, + .state = CACHE_INIT_NEW, + .cache_mode = ocf_cache_mode_default, + .stats_filters = STATS_FILTER_DEFAULT, + .output_format = OUTPUT_FORMAT_DEFAULT, + .io_class_id = -1, + .line_size = ocf_cache_line_size_default, + .cache_state_flush = UNDEFINED, /* three state logic: YES NO UNDEFINED */ + .flush_data = 1, + .cleaning_policy_type = 0, + .script_subcmd = -1, + .try_add = false, + .update_path = false, + .detach = false, + .no_flush = false, + .cache_device = NULL, + .core_device = NULL, + + .params_type = 0, + .params_count = 0, + .verbose = false, +}; + +int validate_device_name(const char *dev_name) { + if (validate_dev(dev_name)) { + cas_printf(LOG_ERR, "Cache creation aborted, %s entry exists in /etc/fstab. Please remove it!\n", + dev_name); + return FAILURE; + } + + if (strnlen(dev_name, MAX_STR_LEN) >= MAX_STR_LEN) { + cas_printf(LOG_ERR, "Illegal device %s\n", dev_name); + return FAILURE; + } + + return SUCCESS; +} + +int command_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, + OCF_CACHE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "core-id")) { + if (validate_str_num(arg[0], "core id", 0, OCF_CORE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.core_id = atoi(arg[0]); + } else if (!strcmp(opt, "core-device")) { + if (validate_device_name(arg[0]) == FAILURE) + return FAILURE; + + command_args_values.core_device = arg[0]; + } else if (!strcmp(opt, "cache-device")) { + if (validate_device_name(arg[0]) == FAILURE) + return FAILURE; + + command_args_values.cache_device = arg[0]; + } else if (!strcmp(opt, "no-data-flush")) { + command_args_values.flush_data = 0; + } else if (!strcmp(opt, "output-format")) { + command_args_values.output_format + = validate_str_output_format(arg[0]); + + if (OUTPUT_FORMAT_INVALID == command_args_values.output_format) + return FAILURE; + } else if (!strcmp(opt, "cleaning-policy-type")) { + command_args_values.cleaning_policy_type = validate_str_cln_policy((const char*)arg[0]); + + if (command_args_values.cleaning_policy_type < 0) + return FAILURE; + } else if (!strcmp(opt, "eviction-policy")) { + command_args_values.eviction_policy_type = validate_str_ev_policy((const char*)arg[0]); + + if (command_args_values.eviction_policy_type < 0) + return FAILURE; + } else if (!strcmp(opt, "try-add")) { + command_args_values.try_add = true; + } else if (!strcmp(opt, "update-path")) { + command_args_values.update_path = true; + } else if (!strcmp(opt, "detach")) { + command_args_values.detach = true; + } else if (!strcmp(opt, "no-flush")) { + command_args_values.no_flush = true; + } else { + return FAILURE; + } + + return SUCCESS; +} + +int remove_core_command_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "cache-id")){ + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, OCF_CACHE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "core-id")){ + if (validate_str_num(arg[0], "core id", 0, OCF_CORE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.core_id = atoi(arg[0]); + } else if (!strcmp(opt, "force")){ + command_args_values.force = 1; + } + + return 0; +} + +int core_pool_remove_command_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "device")) { + if (strnlen_s(arg[0], MAX_STR_LEN) >= MAX_STR_LEN) { + cas_printf(LOG_ERR, "Illegal device %s\n", arg[0]); + return FAILURE; + } + + command_args_values.core_device = arg[0]; + } + + return 0; +} + +int start_cache_command_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "force")) { + command_args_values.force = 1; + } else if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, OCF_CACHE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "load")) { + command_args_values.state = CACHE_INIT_LOAD; + } else if (!strcmp(opt, "cache-device")) { + if(validate_device_name(arg[0]) == FAILURE) + return FAILURE; + + command_args_values.cache_device = arg[0]; + } else if (!strcmp(opt, "cache-mode")) { + command_args_values.cache_mode = + validate_str_cache_mode((const char*)arg[0]); + + if (command_args_values.cache_mode < 0) + return FAILURE; + } else if (!strcmp(opt, "cache-line-size")) { + if (validate_str_num_sbd(arg[0], "cache line size", ocf_cache_line_size_min / KiB, + ocf_cache_line_size_max / KiB) == FAILURE) + return FAILURE; + + command_args_values.line_size = atoi((const char*)arg[0]) * KiB; + } + + return 0; +} + +#define xstr(s) str(s) +#define str(s) #s + +#define CACHE_ID_DESC "Identifier of cache instance <"xstr(OCF_CACHE_ID_MIN)"-"xstr(OCF_CACHE_ID_MAX)">" +#define CACHE_ID_DESC_LONG CACHE_ID_DESC " (if not provided, the first available number will be used)" + +/* OCF_CORE_ID_MAX is defined by arithmetic operations on OCF_CORE_MAX. As a result there is no easy way + * to stringify OCF_CORE_ID_MAX. To work around this, additional definition for max core id is introduced here. + * In case of mismatch between header and local definition preprocessor error is triggered. */ +#define _CASADM_CORE_ID_MAX 4095 +#if (_CASADM_CORE_ID_MAX != OCF_CORE_ID_MAX) +#error "Max core id definitions discrepancy. Please update above definition." +#endif +#define CORE_ID_DESC "Identifier of core <0-"xstr(_CASADM_CORE_ID_MAX)"> within given cache instance" + +#define CACHE_DEVICE_DESC "Caching device to be used" +#define CORE_DEVICE_DESC "Path to core device" + + +static cli_option start_options[] = { + {'d', "cache-device", CACHE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED}, + {'i', "cache-id", CACHE_ID_DESC_LONG, 1, "ID", 0}, + {'l', "load", "Load cache metadata from caching device (DANGEROUS - see manual or Admin Guide for details)"}, + {'f', "force", "Force the creation of cache instance"}, + {'c', "cache-mode", "Set cache mode from available: {"CAS_CLI_HELP_START_CACHE_MODES"} "CAS_CLI_HELP_START_CACHE_MODES_FULL"; without this parameter Write-Through will be set by default", 1, "NAME"}, + {'x', "cache-line-size", "Set cache line size in kibibytes: {4,8,16,32,64}[KiB] (default: %d)", 1, "NUMBER", CLI_OPTION_DEFAULT_INT, 0, 0, ocf_cache_line_size_default / KiB}, + {0} +}; + +static int check_fs(const char* device) +{ + char cache_dev_path[MAX_STR_LEN]; + static const char fsck_cmd[] = "/sbin/fsck -n %s > /dev/null 2>&1"; + static const uint32_t size = MAX_STR_LEN + sizeof(fsck_cmd) + 1; + char buff[size]; + + if (get_dev_path(device, cache_dev_path, sizeof(cache_dev_path))) { + cas_printf(LOG_ERR, "Device does not exist\n"); + return FAILURE; + } + + snprintf(buff, sizeof(buff), fsck_cmd, cache_dev_path); + + if (!system(buff)) { + if (command_args_values.force) { + cas_printf(LOG_INFO, "A filesystem existed on %s. " + "Data may have been lost\n", + device); + } else { + /* file system on cache device */ + cas_printf(LOG_ERR, "A filesystem exists on %s. " + "Specify the --force option if you " + "wish to add the cache anyway.\n" + "Note: this may result in loss of data\n", + device); + return FAILURE; + } + } + + return SUCCESS; +} + +int handle_start() +{ + int cache_device = 0; + int status; + struct stat device_info; + + cache_device = open(command_args_values.cache_device, O_RDONLY); + + if (cache_device < 0) { + cas_printf(LOG_ERR, "Couldn't open cache device %s.\n", + command_args_values.cache_device); + return FAILURE; + } + + if (fstat(cache_device, &device_info)) { + close(cache_device); + cas_printf(LOG_ERR, "Could not stat target device:%s!\n", + command_args_values.cache_device); + return FAILURE; + } + + if (!S_ISBLK(device_info.st_mode)) { + close(cache_device); + cas_printf(LOG_ERR, WRONG_DEVICE_ERROR NOT_BLOCK_ERROR, + command_args_values.cache_device); + return FAILURE; + } + + if (check_fs(command_args_values.cache_device)) { + close(cache_device); + return FAILURE; + } + + if (close(cache_device) < 0) { + cas_printf(LOG_ERR, "Couldn't close the cache device.\n"); + return FAILURE; + } + + status = start_cache(command_args_values.cache_id, + command_args_values.state, + command_args_values.cache_device, + command_args_values.cache_mode, + command_args_values.eviction_policy_type, + command_args_values.line_size, + command_args_values.force); + + return status; +} + +static cli_option list_options[] = { + {'o', "output-format", "Output format: {table|csv}", 1, "FORMAT", 0}, + {0} +}; + +int handle_list() +{ + return list_caches(command_args_values.output_format); +} + +static cli_option stats_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'j', "core-id", "Limit display of core-specific statistics to only ones pertaining to a specific core. If this option is not given, casadm will display statistics pertaining to all cores assigned to given cache instance.", 1, "ID", 0}, + {'d', "io-class-id", "Display per IO class statistics", 1, "ID", CLI_OPTION_OPTIONAL_ARG}, + {'f', "filter", "Apply filters from the following set: {all, conf, usage, req, blk, err}", 1, "FILTER-SPEC"}, + {'o', "output-format", "Output format: {table|csv}", 1, "FORMAT"}, + {0} +}; + +int stats_command_handle_option(char *opt, const char **arg) +{ + int stats_filters; + + if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, + OCF_CACHE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "core-id")) { + if (validate_str_num(arg[0], "core id", 0, + OCF_CORE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.core_id = atoi(arg[0]); + } else if (!strcmp(opt, "io-class-id")) { + if (NULL != arg[0]) { + if (validate_str_num(arg[0], "IO class id", + 0, OCF_IO_CLASS_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.io_class_id = atoi(arg[0]); + } + command_args_values.stats_filters |= STATS_FILTER_IOCLASS; + } else if (!strcmp(opt, "filter")) { + stats_filters = validate_str_stats_filters(arg[0]); + if (STATS_FILTER_INVALID == stats_filters) + return FAILURE; + stats_filters |= (command_args_values.stats_filters & STATS_FILTER_IOCLASS); + command_args_values.stats_filters = stats_filters; + } else if (!strcmp(opt, "output-format")) { + command_args_values.output_format = validate_str_output_format(arg[0]); + if (OUTPUT_FORMAT_INVALID == command_args_values.output_format) + return FAILURE; + } else { + return FAILURE; + } + + return 0; +} + +int handle_stats() +{ + return cache_status(command_args_values.cache_id, + command_args_values.core_id, + command_args_values.io_class_id, + command_args_values.stats_filters, + command_args_values.output_format); +} + +static cli_option stop_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'n', "no-data-flush", "Do not flush dirty data (may be dangerous)"}, + {0} +}; + +int handle_stop() +{ + return stop_cache(command_args_values.cache_id, + command_args_values.flush_data); +} + +/***************************************************************************** + * GET/SET PARAM HELPERS * + *****************************************************************************/ + +#define SELECT_PARAM(_array, _index) ({ \ + _array[_index].select = true; \ +}) + +#define SELECT_CORE_PARAM(_index) \ + SELECT_PARAM(cas_core_params, _index) + +#define SELECT_CACHE_PARAM(_index) \ + SELECT_PARAM(cas_cache_params, _index) + +#define SET_PARAM(_array, _index, _value) ({ \ + SELECT_PARAM(_array, _index); \ + _array[_index].value = _value; \ + command_args_values.params_count++; \ +}) + +#define SET_CORE_PARAM(_index, _value) \ + SET_PARAM(cas_core_params, _index, _value) + +#define SET_CACHE_PARAM(_index, _value) \ + SET_PARAM(cas_cache_params, _index, _value) + +#define CORE_PARAMS_NS_BEGIN(_name, _desc) { \ + .name = _name, \ + .desc = _desc, \ + .options = { \ + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, \ + {'j', "core-id", CORE_ID_DESC, 1, "ID"}, + +#define CORE_PARAMS_NS_END() \ + {0}, \ + },\ +}, + +#define GET_CORE_PARAMS_NS(_name, _desc) { \ + .name = _name, \ + .desc = _desc, \ + .options = { \ + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, \ + {'j', "core-id", CORE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, \ + {'o', "output-format", "Output format: {table|csv}", 1, "FORMAT"}, \ + CORE_PARAMS_NS_END() + +#define CACHE_PARAMS_NS_BEGIN(_name, _desc) { \ + .name = _name, \ + .desc = _desc, \ + .options = { \ + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, \ + +#define CACHE_PARAMS_NS_END() \ + {0}, \ + },\ +}, + +#define GET_CACHE_PARAMS_NS(_name, _desc) \ + CACHE_PARAMS_NS_BEGIN(_name, _desc) \ + {'o', "output-format", "Output format: {table|csv}", 1, "FORMAT"}, \ + CACHE_PARAMS_NS_END() + + +static int core_param_handle_option_generic(char *opt, const char **arg, int (*handler)(char *opt, const char **arg)) +{ + command_args_values.params_type = PARAM_TYPE_CORE; + + if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, + OCF_CACHE_ID_MAX) == FAILURE) { + return FAILURE; + } + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "core-id")) { + if (validate_str_num(arg[0], "core id", OCF_CORE_ID_MIN, + OCF_CORE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.core_id = atoi(arg[0]); + } else { + return handler ? handler(opt, arg) : FAILURE; + } + + return SUCCESS; +} + +static int cache_param_handle_option_generic(char *opt, const char **arg, int (*handler)(char *opt, const char **arg)) +{ + command_args_values.params_type = PARAM_TYPE_CACHE; + + if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, + OCF_CACHE_ID_MAX) == FAILURE) { + return FAILURE; + } + + command_args_values.cache_id = atoi(arg[0]); + } else { + return handler ? handler(opt, arg) : FAILURE; + } + + return SUCCESS; +} + +/***************************************************************************** + * PARAMS DEFINITIONS * + *****************************************************************************/ + +uint32_t seq_cutoff_threshold_transform(uint32_t value) +{ + return value / KiB; +} + +static char *seq_cutoff_policy_values[] = { + [ocf_seq_cutoff_policy_always] = "always", + [ocf_seq_cutoff_policy_full] = "full", + [ocf_seq_cutoff_policy_never] = "never", + NULL, +}; + +static struct cas_param cas_core_params[] = { + /* Sequential cutoff params */ + [core_param_seq_cutoff_threshold] = { + .name = "Sequential cutoff threshold [KiB]" , + .transform_value = seq_cutoff_threshold_transform, + }, + [core_param_seq_cutoff_policy] = { + .name = "Sequential cutoff policy", + .value_names = seq_cutoff_policy_values, + }, + {0}, +}; + +static char *cleaning_policy_type_values[] = { + [ocf_cleaning_nop] = "nop", + [ocf_cleaning_alru] = "alru", + [ocf_cleaning_acp] = "acp", + NULL, +}; + +static struct cas_param cas_cache_params[] = { + /* Cleaning policy type */ + [cache_param_cleaning_policy_type] = { + .name = "Cleaning policy type" , + .value_names = cleaning_policy_type_values, + }, + + /* Cleaning policy ALRU params */ + [cache_param_cleaning_alru_wake_up_time] = { + .name = "Wake up time [s]" , + }, + [cache_param_cleaning_alru_stale_buffer_time] = { + .name = "Stale buffer time [s]" , + }, + [cache_param_cleaning_alru_flush_max_buffers] = { + .name = "Flush max buffers" , + }, + [cache_param_cleaning_alru_activity_threshold] = { + .name = "Activity threshold [ms]" , + }, + + /* Cleaning policy ACP params */ + [cache_param_cleaning_acp_wake_up_time] = { + .name = "Wake up time [ms]" , + }, + [cache_param_cleaning_acp_flush_max_buffers] = { + .name = "Flush max buffers" , + }, + {0}, +}; + +/***************************************************************************** + * SET PARAM NAMESPACE * + *****************************************************************************/ + +#define SEQ_CUT_OFF_THRESHOLD_DESC "Sequential cutoff activation threshold [KiB]" +#define SEQ_CUT_OFF_POLICY_DESC "Sequential cutoff policy. " \ + "Available policies: {always|full|never}" + +#define CLEANING_POLICY_TYPE_DESC "Cleaning policy type. " \ + "Available policy types: {nop|alru|acp}" + +#define CLEANING_ALRU_WAKE_UP_DESC "Period of time between awakenings of flushing thread <%d-%d>[s] (default: %d s)" +#define CLEANING_ALRU_STALENESS_TIME_DESC "Time that has to pass from the last write operation before a dirty cache" \ + " block can be scheduled to be flushed <%d-%d>[s] (default: %d s)" +#define CLEANING_ALRU_FLUSH_MAX_BUFFERS_DESC "Number of dirty cache blocks to be flushed in one cleaning cycle" \ + " <%d-%d> (default: %d)" +#define CLEANING_ALRU_ACTIVITY_THRESHOLD_DESC "Cache idle time before flushing thread can start <%d-%d>[ms]" \ + " (default: %d ms)" + +#define CLEANING_ACP_WAKE_UP_DESC "Time between ACP cleaning thread iterations <%d-%d>[ms] (default: %d ms)" +#define CLEANING_ACP_MAX_BUFFERS_DESC "Number of cache lines flushed in single ACP cleaning thread iteration" \ + " <%d-%d> (default: %d)" + +static cli_namespace set_param_namespace = { + .short_name = 'n', + .long_name = "name", + .entries = { + CORE_PARAMS_NS_BEGIN("seq-cutoff", "Sequential cutoff parameters") + {'t', "threshold", SEQ_CUT_OFF_THRESHOLD_DESC, 1, "KiB", 0}, + {'p', "policy", SEQ_CUT_OFF_POLICY_DESC, 1, "POLICY", 0}, + CORE_PARAMS_NS_END() + + CACHE_PARAMS_NS_BEGIN("cleaning", "Cleaning policy parameters") + {'p', "policy", CLEANING_POLICY_TYPE_DESC, 1, "POLICY", 0}, + CACHE_PARAMS_NS_END() + + CACHE_PARAMS_NS_BEGIN("cleaning-alru", "Cleaning policy ALRU parameters") + {'w', "wake-up", CLEANING_ALRU_WAKE_UP_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ALRU_MIN_WAKE_UP, OCF_ALRU_MAX_WAKE_UP, + OCF_ALRU_DEFAULT_WAKE_UP}, + {'s', "staleness-time", CLEANING_ALRU_STALENESS_TIME_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ALRU_MIN_STALENESS_TIME, OCF_ALRU_MAX_STALENESS_TIME, + OCF_ALRU_DEFAULT_STALENESS_TIME}, + {'b', "flush-max-buffers", CLEANING_ALRU_FLUSH_MAX_BUFFERS_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ALRU_MIN_FLUSH_MAX_BUFFERS, OCF_ALRU_MAX_FLUSH_MAX_BUFFERS, + OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS}, + {'t', "activity-threshold", CLEANING_ALRU_ACTIVITY_THRESHOLD_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ALRU_MIN_ACTIVITY_THRESHOLD, OCF_ALRU_MAX_ACTIVITY_THRESHOLD, + OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD}, + CACHE_PARAMS_NS_END() + + CACHE_PARAMS_NS_BEGIN("cleaning-acp", "Cleaning policy ACP parameters") + {'w', "wake-up", CLEANING_ACP_WAKE_UP_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ACP_MIN_WAKE_UP, OCF_ACP_MAX_WAKE_UP, + OCF_ACP_DEFAULT_WAKE_UP}, + {'b', "flush-max-buffers", CLEANING_ACP_MAX_BUFFERS_DESC, 1, "NUMBER", + CLI_OPTION_RANGE_INT | CLI_OPTION_DEFAULT_INT, + OCF_ACP_MIN_FLUSH_MAX_BUFFERS, OCF_ACP_MAX_FLUSH_MAX_BUFFERS, + OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS}, + CACHE_PARAMS_NS_END() + + {0}, + }, +}; + +int set_param_seq_cutoff_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "threshold")) { + if (validate_str_num(arg[0], "sequential cutoff threshold", 1, + 4194181) == FAILURE) + return FAILURE; + + SET_CORE_PARAM(core_param_seq_cutoff_threshold, atoi(arg[0]) * KiB); + } else if (!strcmp(opt, "policy")) { + if (!strcmp("always", arg[0])) { + SET_CORE_PARAM(core_param_seq_cutoff_policy, + ocf_seq_cutoff_policy_always); + } else if (!strcmp("full", arg[0])) { + SET_CORE_PARAM(core_param_seq_cutoff_policy, + ocf_seq_cutoff_policy_full); + } else if (!strcmp("never", arg[0])) { + SET_CORE_PARAM(core_param_seq_cutoff_policy, + ocf_seq_cutoff_policy_never); + } else { + cas_printf(LOG_ERR, "Error: Invalid policy name.\n"); + return FAILURE; + } + } else { + return FAILURE; + } + + return SUCCESS; +} + +int set_param_cleaning_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "policy")) { + if (!strcmp("nop", arg[0])) { + SET_CACHE_PARAM(cache_param_cleaning_policy_type, + ocf_cleaning_nop); + } else if (!strcmp("alru", arg[0])) { + SET_CACHE_PARAM(cache_param_cleaning_policy_type, + ocf_cleaning_alru); + } else if (!strcmp("acp", arg[0])) { + SET_CACHE_PARAM(cache_param_cleaning_policy_type, + ocf_cleaning_acp); + } else { + cas_printf(LOG_ERR, "Error: Invalid policy name.\n"); + return FAILURE; + } + } else { + return FAILURE; + } + + return SUCCESS; +} + +int set_param_cleaning_alru_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "wake-up")) { + if (validate_str_num(arg[0], "wake-up time", + OCF_ALRU_MIN_WAKE_UP, OCF_ALRU_MAX_WAKE_UP)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_alru_wake_up_time, + strtoul(arg[0], NULL, 10)); + } else if (!strcmp(opt, "staleness-time")) { + if (validate_str_num(arg[0], "staleness time", + OCF_ALRU_MIN_STALENESS_TIME, OCF_ALRU_MAX_STALENESS_TIME)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_alru_stale_buffer_time, + strtoul(arg[0], NULL, 10)); + } else if (!strcmp(opt, "flush-max-buffers")) { + if (validate_str_num(arg[0], "flush max buffers", + OCF_ALRU_MIN_FLUSH_MAX_BUFFERS, OCF_ALRU_MAX_FLUSH_MAX_BUFFERS)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_alru_flush_max_buffers, + strtoul(arg[0], NULL, 10)); + } else if (!strcmp(opt, "activity-threshold")) { + if (validate_str_num(arg[0], "activity threshold", + OCF_ALRU_MIN_ACTIVITY_THRESHOLD, OCF_ALRU_MAX_ACTIVITY_THRESHOLD)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_alru_activity_threshold, + strtoul(arg[0], NULL, 10)); + } else { + return FAILURE; + } + + return SUCCESS; +} + +int set_param_cleaning_acp_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "wake-up")) { + if (validate_str_num(arg[0], "wake-up time", + OCF_ACP_MIN_WAKE_UP, OCF_ACP_MAX_WAKE_UP)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_acp_wake_up_time, + strtoul(arg[0], NULL, 10)); + } else if (!strcmp(opt, "flush-max-buffers")) { + if (validate_str_num(arg[0], "flush max buffers", + OCF_ACP_MIN_FLUSH_MAX_BUFFERS, OCF_ACP_MAX_FLUSH_MAX_BUFFERS)) { + return FAILURE; + } + + SET_CACHE_PARAM(cache_param_cleaning_acp_flush_max_buffers, + strtoul(arg[0], NULL, 10)); + } else { + return FAILURE; + } + + return SUCCESS; +} + +int set_param_namespace_handle_option(char *namespace, char *opt, const char **arg) +{ + if (!strcmp(namespace, "seq-cutoff")) { + return core_param_handle_option_generic(opt, arg, + set_param_seq_cutoff_handle_option); + } else if (!strcmp(namespace, "cleaning")) { + return cache_param_handle_option_generic(opt, arg, + set_param_cleaning_handle_option); + } else if (!strcmp(namespace, "cleaning-alru")) { + return cache_param_handle_option_generic(opt, arg, + set_param_cleaning_alru_handle_option); + } else if (!strcmp(namespace, "cleaning-acp")) { + return cache_param_handle_option_generic(opt, arg, + set_param_cleaning_acp_handle_option); + } else { + return FAILURE; + } +} + + +int handle_set_param() +{ + if (command_args_values.params_count == 0) { + cas_printf(LOG_ERR, "Error: No parameters specified!\n"); + return FAILURE; + } + + switch (command_args_values.params_type) { + case PARAM_TYPE_CORE: + return core_params_set(command_args_values.cache_id, + command_args_values.core_id, + cas_core_params); + case PARAM_TYPE_CACHE: + return cache_params_set(command_args_values.cache_id, + cas_cache_params); + default: + return FAILURE; + } +} + +/***************************************************************************** + * GET PARAM NAMESPACE * + *****************************************************************************/ + +static cli_namespace get_param_namespace = { + .short_name = 'n', + .long_name = "name", + .entries = { + GET_CORE_PARAMS_NS("seq-cutoff", "Sequential cutoff parameters") + GET_CACHE_PARAMS_NS("cleaning", "Cleaning policy parameters") + GET_CACHE_PARAMS_NS("cleaning-alru", "Cleaning policy ALRU parameters") + GET_CACHE_PARAMS_NS("cleaning-acp", "Cleaning policy ACP parameters") + + {0}, + }, +}; + +int get_param_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "output-format")) { + command_args_values.output_format = validate_str_output_format(arg[0]); + if (OUTPUT_FORMAT_INVALID == command_args_values.output_format) + return FAILURE; + } else { + return FAILURE; + } + + return SUCCESS; +} + +int get_param_namespace_handle_option(char *namespace, char *opt, const char **arg) +{ + if (!strcmp(namespace, "seq-cutoff")) { + SELECT_CORE_PARAM(core_param_seq_cutoff_threshold); + SELECT_CORE_PARAM(core_param_seq_cutoff_policy); + return core_param_handle_option_generic(opt, arg, + get_param_handle_option); + } else if (!strcmp(namespace, "cleaning")) { + SELECT_CACHE_PARAM(cache_param_cleaning_policy_type); + return cache_param_handle_option_generic(opt, arg, + get_param_handle_option); + } else if (!strcmp(namespace, "cleaning-alru")) { + SELECT_CACHE_PARAM(cache_param_cleaning_alru_wake_up_time); + SELECT_CACHE_PARAM(cache_param_cleaning_alru_stale_buffer_time); + SELECT_CACHE_PARAM(cache_param_cleaning_alru_flush_max_buffers); + SELECT_CACHE_PARAM(cache_param_cleaning_alru_activity_threshold); + return cache_param_handle_option_generic(opt, arg, + get_param_handle_option); + } else if (!strcmp(namespace, "cleaning-acp")) { + SELECT_CACHE_PARAM(cache_param_cleaning_acp_wake_up_time); + SELECT_CACHE_PARAM(cache_param_cleaning_acp_flush_max_buffers); + return cache_param_handle_option_generic(opt, arg, + get_param_handle_option); + } else { + return FAILURE; + } +} + +int handle_get_param() +{ + int format = TEXT; + + if (OUTPUT_FORMAT_CSV == command_args_values.output_format) { + format = RAW_CSV; + } + + switch (command_args_values.params_type) { + case PARAM_TYPE_CORE: + return core_params_get(command_args_values.cache_id, + command_args_values.core_id, + cas_core_params, format); + case PARAM_TYPE_CACHE: + return cache_params_get(command_args_values.cache_id, + cas_cache_params, format); + default: + return FAILURE; + } +} + +static cli_option set_state_cache_mode_options[] = { + {'c', "cache-mode", "Cache mode. Available cache modes: {"CAS_CLI_HELP_SET_CACHE_MODES"}", 1, "NAME", CLI_OPTION_REQUIRED}, + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'f', "flush-cache", "Flush all dirty data from cache before switching to new mode. Option is required when switching from Write-Back mode", 1, "yes|no",0}, + {0}, +}; + +int set_cache_mode_command_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "cache-mode")) { + command_args_values.cache_mode = + validate_str_cache_mode((const char*)arg[0]); + + if (command_args_values.cache_mode < 0) + return FAILURE; + } else if (!strcmp(opt, "cache-id")) { + if (validate_str_num(arg[0], "cache id", OCF_CACHE_ID_MIN, + OCF_CACHE_ID_MAX) == FAILURE) + return FAILURE; + + command_args_values.cache_id = atoi(arg[0]); + } else if (!strcmp(opt, "flush-cache")) { + if (!strcmp("yes", arg[0])) + command_args_values.cache_state_flush = YES; + else if (!strcmp("no", arg[0])) + command_args_values.cache_state_flush = NO; + else { + cas_printf(LOG_ERR, "Error: 'yes' or 'no' required as an argument for -f option.\n"); + return FAILURE; + } + } else { + return FAILURE; + } + + return 0; +} + +int handle_set_cache_mode() +{ + return set_cache_mode(command_args_values.cache_mode, + command_args_values.cache_id, + command_args_values.cache_state_flush); +} + +static cli_option add_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'j', "core-id", CORE_ID_DESC, 1, "ID", 0}, + {'d', "core-device", CORE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED}, + {0} +}; + +int handle_add() +{ + return add_core(command_args_values.cache_id, + command_args_values.core_id, + command_args_values.core_device, + false, false); +} + +static cli_option remove_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'j', "core-id", CORE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'f', "force", "Force remove inactive core"}, + {0} +}; + +int handle_remove() +{ + return remove_core(command_args_values.cache_id, + command_args_values.core_id, + false, + command_args_values.force); +} + +static cli_option core_pool_remove_options[] = { + {'d', "device", CORE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED}, + {0} +}; + +int handle_core_pool_remove() +{ + return core_pool_remove(command_args_values.core_device); +} + +#define RESET_COUNTERS_CORE_ID_DESC "Identifier of core <0-"xstr(_CASADM_CORE_ID_MAX) \ + "> within given cache instance. If not specified, statistics are reset " \ + "for all cores in cache instance." + +static cli_option reset_counters_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'j', "core-id", RESET_COUNTERS_CORE_ID_DESC, 1, "ID", 0}, + {0} +}; + +int handle_reset_counters() +{ + return reset_counters(command_args_values.cache_id, + command_args_values.core_id); +} + +static cli_option flush_core_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {'j', "core-id", CORE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {0} +}; + +int handle_flush_core() +{ + return flush_core(command_args_values.cache_id, + command_args_values.core_id); +} + +static cli_option flush_cache_options[] = { + {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, + {0} +}; + +int handle_flush_cache() +{ + return flush_cache(command_args_values.cache_id); +} + +/******************************************************************************* + * IO Classes Commands + ******************************************************************************/ + +enum { + io_class_opt_subcmd_configure = 0, + io_class_opt_subcmd_list, + + io_class_opt_cache_id, + io_class_opt_cache_file_load, + io_class_opt_output_format, + + io_class_opt_io_class_id, + io_class_opt_prio, + io_class_opt_min_size, + io_class_opt_max_size, + io_class_opt_name, + io_class_opt_cache_mode, + + io_class_opt_flag_required, + io_class_opt_flag_set, + + io_class_opt_subcmd_unknown, +}; + +/* IO class command options */ +static cli_option io_class_params_options[] = { + [io_class_opt_subcmd_configure] = { + .short_name = 'C', + .long_name = "load-config", + .desc = "Loads configuration for IO classes", + .args_count = 0, + .arg = NULL, + .priv = 0, + .flags = CLI_OPTION_DEFAULT_INT + }, + [io_class_opt_subcmd_list] = { + .short_name = 'L', + .long_name = "list", + .desc = "Lists currently configured IO classes", + .args_count = 0, + .arg = NULL, + .priv = 0, + .flags = CLI_OPTION_DEFAULT_INT, + }, + [io_class_opt_cache_id] = { + .short_name = 'i', + .long_name = "cache-id", + .desc = CACHE_ID_DESC, + .args_count = 1, + .arg = "ID", + .priv = (1 << io_class_opt_subcmd_configure) + | (1 << io_class_opt_subcmd_list) + | (1 << io_class_opt_flag_required), + .flags = CLI_OPTION_RANGE_INT, + .max_value = 0, + .min_value = OCF_CACHE_ID_MAX, + }, + [io_class_opt_cache_file_load] = { + .short_name = 'f', + .long_name = "file", + .desc = "Configuration file containing IO class definition", + .args_count = 1, + .arg = "FILE", + .priv = (1 << io_class_opt_subcmd_configure) + | (1 << io_class_opt_flag_required) + }, + [io_class_opt_output_format] = { + .short_name = 'o', + .long_name = "output-format", + .desc = "Output format: {table|csv}", + .args_count = 1, + .arg = "FORMAT", + .priv = (1 << io_class_opt_subcmd_list) + }, + + [io_class_opt_io_class_id] = { + .short_name = 'd', + .long_name = "io-class-id", + .desc = "IO class ID", + .args_count = 1, + .arg = "ID", + .priv = (1 << io_class_opt_flag_required), + }, + [io_class_opt_prio] = { + .short_name = 'p', + .long_name = "priority", + .desc = "IO class priority", + .args_count = 1, + .arg = xstr(OCF_IO_CLASS_PRIO_HIGHEST)"-"xstr(OCF_IO_CLASS_PRIO_LOWEST), + .flags = CLI_OPTION_RANGE_INT, + .min_value = OCF_IO_CLASS_PRIO_HIGHEST, + .max_value = OCF_IO_CLASS_PRIO_LOWEST, + }, + [io_class_opt_min_size] = { + .short_name = 'm', + .long_name = "min-size", + .desc = "Guaranteed size of cache space for this IO class", + .args_count = 1, + .arg = "SIZE", + }, + [io_class_opt_max_size] = { + .short_name = 'x', + .long_name = "max-size", + .desc = "Maximum size of cache space for this IO class", + .args_count = 1, + .arg = "SIZE", + }, + [io_class_opt_name] = { + .short_name = 'n', + .long_name = "name", + .desc = "Optional textual name for this IO class", + .args_count = 1, + .arg = "NAME", + }, + [io_class_opt_cache_mode] = { + .short_name = 'c', + .long_name = "cache-mode", + .desc = "Overwrite cache mode for this IO class from available: {"CAS_CLI_HELP_START_CACHE_MODES"}", + .args_count = 1, + .arg = "NAME", + }, + + {0} +}; + +struct { + int subcmd; + int cache_id; + int io_class_id; + int cache_mode; + int io_class_prio; + int output_format; + uint32_t min; + uint32_t max; + char file[MAX_STR_LEN]; + char name[OCF_IO_CLASS_NAME_MAX]; +} static io_class_params = { + .subcmd = io_class_opt_subcmd_unknown, + .cache_id = 0, + .file = "", + .output_format = OUTPUT_FORMAT_DEFAULT +}; + +/* Filler to print sub-commands */ +int io_class_print_subcmd(cli_option* options, int flag) +{ + return (0 == (options->flags & CLI_OPTION_DEFAULT_INT)) ? 0 : 1; +} + +/* Filler to print parameters of given sub-command */ +int io_class_print_param(cli_option* options, int flag) +{ + return (flag == (options->priv & flag)) ? 1 : 0; +} + +static inline void io_class_print_invalid_subcmd(void) +{ + cas_printf(LOG_ERR, "Invalid or missing first sub-command parameter "); + cas_printf(LOG_ERR, "Expected one of the: {"); + print_options_usage(io_class_params_options, "|", io_class_print_subcmd, 0); + cas_printf(LOG_INFO, "}\n"); +} + +/* Parser of option for IO class command */ +int io_class_handle_option(char *opt, const char **arg) +{ + if (io_class_opt_subcmd_unknown == io_class_params.subcmd) { + /* First parameters which defines sub-command */ + if (!strcmp(opt, "load-config")) { + io_class_params.subcmd = io_class_opt_subcmd_configure; + return 0; + } else if (!strcmp(opt, "list")) { + io_class_params.subcmd = io_class_opt_subcmd_list; + return 0; + } + } + + if (!strcmp(opt, "cache-id")) { + if (command_handle_option(opt, arg)) + return FAILURE; + + io_class_params_options[io_class_opt_cache_id].priv |= (1 << io_class_opt_flag_set); + io_class_params.cache_id = command_args_values.cache_id; + } else if (!strcmp(opt, "file")) { + if (validate_path(arg[0], 0)) + return FAILURE; + + io_class_params_options[io_class_opt_cache_file_load].priv |= (1 << io_class_opt_flag_set); + + strncpy_s(io_class_params.file, sizeof(io_class_params.file), arg[0], strnlen_s(arg[0], sizeof(io_class_params.file))); + } else if (!strcmp(opt, "output-format")) { + io_class_params.output_format = validate_str_output_format(arg[0]); + if (OUTPUT_FORMAT_INVALID == io_class_params.output_format) + return FAILURE; + + io_class_params_options[io_class_opt_output_format].priv |= (1 << io_class_opt_flag_set); + } + + return 0; +} + +/* Check if all required command were set depending on command type */ +int io_class_is_missing() { + int result = 0; + int mask; + cli_option* iter = io_class_params_options; + + for (;iter->long_name; iter++) { + char option_name[MAX_STR_LEN]; + if (iter->flags & CLI_OPTION_DEFAULT_INT) { + continue; + } + + command_name_in_brackets(option_name, MAX_STR_LEN, iter->short_name, iter->long_name); + + if (iter->priv & (1 << io_class_opt_flag_set)) { + /* Option is set, check if this option is allowed */ + mask = (1 << io_class_params.subcmd); + if (0 == (mask & iter->priv)) { + cas_printf(LOG_INFO, "Option '%s' is not allowed\n", option_name); + result = -1; + } + + } else { + /* Option is missing, check if it is required for this sub-command*/ + mask = (1 << io_class_params.subcmd) | (1 << io_class_opt_flag_required); + if (mask == (iter->priv & mask)) { + cas_printf(LOG_INFO, "Option '%s' is missing\n", option_name); + result = -1; + } + } + } + + return result; +} + +/* Command handler */ +int io_class_handle() { + /* Check if sub-command was specified */ + if (io_class_opt_subcmd_unknown == io_class_params.subcmd) { + io_class_print_invalid_subcmd(); + return FAILURE; + } + + /* Check if all required options are set */ + if (io_class_is_missing()) { + return FAILURE; + } + + switch (io_class_params.subcmd) { + case io_class_opt_subcmd_configure: + return partition_setup(io_class_params.cache_id, + io_class_params.file); + case io_class_opt_subcmd_list: + return partition_list(io_class_params.cache_id, + io_class_params.output_format); + } + + return FAILURE; +} + +/******************************************************************************* + * Script Commands + ******************************************************************************/ +enum { + script_cmd_unknown = -1, + + script_cmd_min_id = 0, + + script_cmd_upgrade = script_cmd_min_id, + script_cmd_check_cache_device, + + script_cmd_add_core, + script_cmd_remove_core, + + script_cmd_max_id, + + script_opt_min_id = script_cmd_max_id, + + script_opt_cache_device = script_opt_min_id, + script_opt_cache_id, + script_opt_core_id, + script_opt_core_device, + script_opt_try_add, + script_opt_update_path, + script_opt_detach, + script_opt_no_flush, + + script_opt_max_id, + + script_opt_flag_set, +}; + +/* + * Field .priv in script_cmd_* elements contains id of required options, + * script_opt_* .priv fields contains id of commands, where they can be used + */ +static cli_option script_params_options[] = { + [script_cmd_upgrade] = { + .short_name = 0, + .long_name = "upgrade-in-flight", + .args_count = 0, + .arg = NULL, + .priv = 0, + .flags = CLI_COMMAND_HIDDEN, + }, + [script_cmd_check_cache_device] = { + .short_name = 0, + .long_name = "check-cache-device", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_opt_cache_device), + .flags = CLI_COMMAND_HIDDEN, + }, + [script_cmd_add_core] = { + .short_name = 0, + .long_name = "add-core", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_opt_core_device) + | (1 << script_opt_cache_id), + .flags = CLI_COMMAND_HIDDEN, + }, + [script_cmd_remove_core] = { + .short_name = 0, + .long_name = "remove-core", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_opt_cache_id) + | (1 << script_opt_core_id), + .flags = CLI_COMMAND_HIDDEN, + }, + [script_opt_cache_device] = { + .short_name = 0, + .long_name = "cache-device", + .args_count = 1, + .arg = "DEVICE", + .priv = (1 << script_cmd_check_cache_device), + .flags = CLI_OPTION_HIDDEN, + }, + [script_opt_cache_id] = { + .short_name = 0, + .long_name = "cache-id", + .args_count = 1, + .arg = "ID", + .priv = (1 << script_cmd_remove_core) + | (1 << script_cmd_add_core), + .flags = (CLI_OPTION_RANGE_INT | CLI_OPTION_HIDDEN), + .min_value = OCF_CACHE_ID_MIN, + .max_value = OCF_CACHE_ID_MAX, + }, + [script_opt_core_id] = { + .short_name = 0, + .long_name = "core-id", + .args_count = 1, + .arg = "ID", + .priv = (1 << script_cmd_remove_core) + | (1 << script_cmd_add_core), + .flags = (CLI_OPTION_RANGE_INT | CLI_OPTION_HIDDEN), + .min_value = OCF_CORE_ID_MIN, + .max_value = OCF_CORE_ID_MAX, + }, + [script_opt_core_device] = { + .short_name = 0, + .long_name = "core-device", + .args_count = 1, + .arg = "DEVICE", + .priv = (1 << script_cmd_add_core), + .flags = CLI_OPTION_HIDDEN, + }, + [script_opt_try_add] = { + .short_name = 0, + .long_name = "try-add", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_cmd_add_core), + .flags = CLI_OPTION_HIDDEN, + }, + [script_opt_update_path] = { + .short_name = 0, + .long_name = "update-path", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_cmd_add_core), + .flags = CLI_OPTION_HIDDEN, + }, + [script_opt_detach] = { + .short_name = 0, + .long_name = "detach", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_cmd_remove_core), + .flags = CLI_OPTION_HIDDEN, + }, + [script_opt_no_flush] = { + .short_name = 0, + .long_name = "no-flush", + .args_count = 0, + .arg = NULL, + .priv = (1 << script_cmd_remove_core), + .flags = CLI_OPTION_HIDDEN, + }, + + {0} +}; + +int script_handle_option(char *opt, const char **arg) +{ + int id; + if (script_cmd_unknown == command_args_values.script_subcmd) { + for (id = script_cmd_min_id; id < script_cmd_max_id; id++) { + if (!strcmp(opt, script_params_options[id].long_name)) { + command_args_values.script_subcmd = id; + return SUCCESS; + } + } + return FAILURE; + } + + for (id = script_opt_min_id; id < script_opt_max_id; id++) { + if (!strcmp(opt, script_params_options[id].long_name)) { + if (command_handle_option(opt, arg) == FAILURE) + return FAILURE; + + script_params_options[id].priv |= (1 << script_opt_flag_set); + + return SUCCESS; + } + } + + return FAILURE; +} + +int is_option_allowed(int option_id) { + cli_option option = script_params_options[option_id]; + int commands_compatible_with_option = option.priv; + int selected_command = command_args_values.script_subcmd; + int command_flag = 1 << selected_command; + int option_is_allowed = command_flag & commands_compatible_with_option; + + return option_is_allowed; +} + +int is_option_required(int option_id) { + int option_flag = (1 << option_id); + int selected_command = command_args_values.script_subcmd; + int command_required_options = script_params_options[selected_command].priv; + int option_is_required = command_required_options & option_flag; + + return option_is_required; +} + +int script_command_is_valid() { + int result = SUCCESS; + int option_id; + cli_option* option = &script_params_options[script_opt_min_id]; + + for (option_id = script_opt_min_id; option_id < script_opt_max_id; option++, option_id++) { + char option_name[MAX_STR_LEN]; + int option_is_set = option->priv & (1 << script_opt_flag_set); + int option_has_default_value = option->flags & CLI_OPTION_DEFAULT_INT; + + if (option_has_default_value) + continue; + + command_name_in_brackets(option_name, MAX_STR_LEN, option->short_name, option->long_name); + + if (option_is_set) { + if (!is_option_allowed(option_id)) { + cas_printf(LOG_INFO, "Option '%s' is not allowed\n", option_name); + result = FAILURE; + } + } else { + if (is_option_required(option_id)) { + cas_printf(LOG_INFO, "Option '%s' is missing\n", option_name); + result = FAILURE; + } + } + } + + return result; +} + +int script_handle() { + if (script_cmd_unknown == command_args_values.script_subcmd) { + cas_printf(LOG_ERR, "Invalid or missing first sub-command parameter\n"); + return FAILURE; + } + + if (script_command_is_valid() == FAILURE) { + return FAILURE; + } + + switch (command_args_values.script_subcmd) { + case script_cmd_check_cache_device: + return check_cache_device(command_args_values.cache_device); + case script_cmd_upgrade: + return upgrade_start(); + case script_cmd_add_core: + return add_core( + command_args_values.cache_id, + command_args_values.core_id, + command_args_values.core_device, + command_args_values.try_add, + command_args_values.update_path + ); + case script_cmd_remove_core: + return remove_core( + command_args_values.cache_id, + command_args_values.core_id, + command_args_values.detach, + command_args_values.no_flush + ); + } + + return FAILURE; +} + +/******************************************************************************* + * NVMe Commands + ******************************************************************************/ + +enum { + nvme_opt_subcmd_format = 0, + + nvme_opt_device, + nvme_opt_force, + + nvme_opt_flag_required, + nvme_opt_flag_set, + + nvme_opt_subcmd_unknown, +}; + +/* NVMe command options */ +static cli_option nvme_options[] = { + [nvme_opt_subcmd_format] = { + .short_name = 'F', + .long_name = "format", + .desc = "Change NVMe metadata mode {normal|atomic} WARNING: Reboot required!", + .args_count = 1, + .arg = "MODE", + .flags = CLI_OPTION_REQUIRED, + }, + [nvme_opt_device] = { + .short_name = 'd', + .long_name = "device", + .desc = "NVMe device to be formatted", + .args_count = 1, + .arg = "DEVICE", + .flags = CLI_OPTION_REQUIRED, + }, + [nvme_opt_force] = { + .short_name = 'f', + .long_name = "force", + .desc = "Force NVMe format", + .args_count = 0, + .arg = NULL, + .flags = CLI_OPTION_OPTIONAL_ARG, + }, + {0} +}; + + +struct { + const char *device; + int metadata_mode; + int force; +} static nvme_params = { + .device = "", + .metadata_mode = 0, + .force = 0, +}; + + +/* Parser of option for IO class command */ +int nvme_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "device")) { + nvme_params.device = arg[0]; + } else if (!strcmp(opt, "format")) { + nvme_params.metadata_mode = validate_str_metadata_mode(arg[0]); + + if (METADATA_MODE_INVALID == nvme_params.metadata_mode) + return FAILURE; + } else if (!strcmp(opt, "force")) { + nvme_params.force = 1; + } else { + return FAILURE; + } + + return 0; +} + +static int handle_nvme_format() +{ + struct kcas_capabilites cas_capabilites; + static const char fsck_cmd[] = "/sbin/fsck -n %s > /dev/null 2>&1"; + static const uint32_t size = MAX_STR_LEN + sizeof(fsck_cmd) + 1; + char nvme_dev_path[MAX_STR_LEN]; + char buff[size]; + + if (get_cas_capabilites(&cas_capabilites)) { + cas_printf(LOG_ERR, "Can't obtain CAS capabilities\n"); + return FAILURE; + } + + if (!cas_capabilites.nvme_format) { + cas_printf(LOG_ERR, "Command is not supported\n"); + return FAILURE; + } + + if (get_dev_path(nvme_params.device, nvme_dev_path, + sizeof(nvme_dev_path))) { + cas_printf(LOG_ERR, "Device does not exist\n"); + return FAILURE; + } + + snprintf(buff, sizeof(buff), fsck_cmd, nvme_dev_path); + + if (!system(buff)) { + if (nvme_params.force) { + cas_printf(LOG_INFO, "A filesystem existed on %s. " + "Data may have been lost\n", + nvme_params.device); + } else { + /* file system on cache device */ + cas_printf(LOG_ERR, "A filesystem exists on %s. " + "Specify the --force option if you " + "wish to format the device anyway.\n" + "Note: this may result in loss of data\n", + nvme_params.device); + return FAILURE; + } + } + + return nvme_format(nvme_dev_path, nvme_params.metadata_mode, + nvme_params.force); +} + +static cli_option version_options[] = { + { + .short_name = 'o', + .long_name = "output-format", + .desc = "Output format: {table|csv}", + .args_count = 1, + .arg = "FORMAT", + }, + {0} +}; + +int version_handle_option(char *opt, const char **arg) +{ + if (!strcmp(opt, "output-format")) { + command_args_values.output_format = validate_str_output_format(arg[0]); + if (OUTPUT_FORMAT_INVALID == command_args_values.output_format) + return FAILURE; + } else { + return FAILURE; + } + + return 0; +} + +static int handle_version(void) +{ + char buff[MAX_STR_LEN]; + + FILE *intermediate_file[2]; + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + return FAILURE; + } + + fprintf(intermediate_file[1], TAG(TABLE_HEADER) "Name,Version\n"); + + fprintf(intermediate_file[1], TAG(TABLE_ROW) OCF_LOGO " Cache Kernel Module,"); + if (cas_module_version(buff, MAX_STR_LEN)) { + fprintf(intermediate_file[1], "Not Loaded\n"); + } else { + fprintf(intermediate_file[1], "%s\n", buff); + } + + fprintf(intermediate_file[1], TAG(TABLE_ROW) OCF_LOGO " Disk Kernel Module,"); + if (disk_module_version(buff, MAX_STR_LEN)) { + fprintf(intermediate_file[1], "Not Loaded\n"); + } else { + fprintf(intermediate_file[1], "%s\n", buff); + } + + fprintf(intermediate_file[1], TAG(TABLE_ROW) OCF_LOGO " CLI Utility,"); + fprintf(intermediate_file[1], "%s\n", buff); + + int format = TEXT; + if (OUTPUT_FORMAT_CSV == command_args_values.output_format) { + format = RAW_CSV; + } + + fclose(intermediate_file[1]); + stat_format_output(intermediate_file[0], stdout, format); + fclose(intermediate_file[0]); + + return SUCCESS; +} + +/* Print help for IO class command */ +void io_class_help(app *app_values, cli_command *cmd) +{ + int i, flag = 0, all_ops, printed_ops; + char option_name[MAX_STR_LEN]; + cli_option* iter = &(cmd->options[0]); + + struct kcas_capabilites caps; + if (get_cas_capabilites(&caps)) { + memset(&caps, 0, sizeof(caps)); + } + + + /* Print usage */ + cas_printf(LOG_INFO, "Usage: %s --%s {", app_values->name, cmd->name); + print_options_usage(cmd->options, "|", io_class_print_subcmd, 0); + cas_printf(LOG_INFO, "}\n\n"); + + print_command_header(app_values, cmd); + + for (;iter->long_name; iter++, flag++) { + if (0 == (iter->flags & CLI_OPTION_DEFAULT_INT)) { + continue; + } + + cas_printf(LOG_INFO, "\n"); + + cas_printf(LOG_INFO, "%s:\n", iter->desc); + + cas_printf(LOG_INFO, "Usage: %s --%s --%s ", app_values->name, + cmd->name, iter->long_name); + + all_ops = printed_ops = 0; + for (i = 0; cmd->options[i].long_name != NULL; i++) { + if (0 == cmd->options[i].priv) { + continue; + } + + if (1 == io_class_print_param(&cmd->options[i], (1 << flag))) { + all_ops++; + } else { + continue; + } + + if (1 == io_class_print_param(&cmd->options[i], (1 << io_class_opt_flag_required))) { + printed_ops++; + } + } + + print_options_usage(cmd->options, " ", io_class_print_param, + (1 << flag) | (1 << io_class_opt_flag_required)); + + if (all_ops != printed_ops) { + cas_printf(LOG_INFO, " [option...]"); + } + command_name_in_brackets(option_name, MAX_STR_LEN, iter->short_name, iter->long_name); + cas_printf(LOG_INFO, "\nOptions that are valid with %s are:\n", option_name); + + print_list_options(cmd->options, (1 << flag), io_class_print_param); + + cas_printf(LOG_INFO, "\n"); + } +} + +static int handle_help(); + +static cli_command cas_commands[] = { + { + .name = "start-cache", + .short_name = 'S', + .desc = "Start new cache instance or load using metadata", + .long_desc = NULL, + .options = start_options, + .command_handle_opts = start_cache_command_handle_option, + .handle = handle_start, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "stop-cache", + .short_name = 'T', + .desc = "Stop cache instance", + .long_desc = NULL, + .options = stop_options, + .command_handle_opts = command_handle_option, + .handle = handle_stop, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "set-param", + .short_name = 'X', + .desc = "Set various runtime parameters", + .long_desc = "Set various runtime parameters", + .namespace = &set_param_namespace, + .namespace_handle_opts = set_param_namespace_handle_option, + .handle = handle_set_param, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "get-param", + .short_name = 'G', + .desc = "Get various runtime parameters", + .long_desc = "Get various runtime parameters", + .namespace = &get_param_namespace, + .namespace_handle_opts = get_param_namespace_handle_option, + .handle = handle_get_param, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "set-cache-mode", + .short_name = 'Q', + .desc = "Set cache mode", + .long_desc = "Set cache mode", + .options = set_state_cache_mode_options, + .command_handle_opts = set_cache_mode_command_handle_option, + .handle = handle_set_cache_mode, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "add-core", + .short_name = 'A', + .desc = "Add core device to cache instance", + .long_desc = NULL, + .options = add_options, + .command_handle_opts = command_handle_option, + .handle = handle_add, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "remove-core", + .short_name = 'R', + .desc = "Remove core device from cache instance", + .long_desc = NULL, + .options = remove_options, + .command_handle_opts = remove_core_command_handle_option, + .handle = handle_remove, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "remove-detached", + .desc = "Remove core device from core pool", + .long_desc = NULL, + .options = core_pool_remove_options, + .command_handle_opts = core_pool_remove_command_handle_option, + .handle = handle_core_pool_remove, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "list-caches", + .short_name = 'L', + .desc = "List all cache instances and core devices", + .long_desc = NULL, + .options = list_options, + .command_handle_opts = command_handle_option, + .handle = handle_list, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "stats", + .short_name = 'P', + .desc = "Print statistics for cache instance", + .long_desc = NULL, + .options = stats_options, + .command_handle_opts = stats_command_handle_option, + .handle = handle_stats, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "reset-counters", + .short_name = 'Z', + .desc = "Reset cache statistics for core device within cache instance", + .long_desc = NULL, + .options = reset_counters_options, + .command_handle_opts = command_handle_option, + .handle = handle_reset_counters, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "flush-cache", + .short_name = 'F', + .desc = "Flush all dirty data from the caching device to core devices", + .long_desc = NULL, + .options = flush_cache_options, + .command_handle_opts = command_handle_option, + .handle = handle_flush_cache, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "flush-core", + .short_name = 'E', + .desc = "Flush dirty data of a given core from the caching device to this core device", + .long_desc = NULL, + .options = flush_core_options, + .command_handle_opts = command_handle_option, + .handle = handle_flush_core, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "io-class", + .short_name = 'C', + .desc = "Manage IO classes", + .long_desc = NULL, + .options = io_class_params_options, + .command_handle_opts = io_class_handle_option, + .handle = io_class_handle, + .flags = CLI_SU_REQUIRED, + .help = io_class_help, + }, + { + .name = "nvme", + .short_name = 'N', + .desc = "Manage NVMe namespace", + .long_desc = NULL, + .options = nvme_options, + .command_handle_opts = nvme_handle_option, + .handle = handle_nvme_format, + .flags = CLI_SU_REQUIRED, + .help = NULL, + }, + { + .name = "version", + .short_name = 'V', + .desc = "Print " OCF_LOGO " version", + .long_desc = NULL, + .options = version_options, + .command_handle_opts = version_handle_option, + .handle = handle_version, + .flags = 0, + .help = NULL + }, + { + .name = "help", + .short_name = 'H', + .desc = "Print help", + .long_desc = NULL, + .options = NULL, + .command_handle_opts = NULL, + .flags = 0, + .handle = handle_help, + .help = NULL + }, + { + .name = "script", + .options = script_params_options, + .command_handle_opts = script_handle_option, + .flags = (CLI_COMMAND_HIDDEN | CLI_SU_REQUIRED), + .handle = script_handle, + }, + {0}, +}; + +#define MAN_PAGE "casadm" +#define HELP_FOOTER "" + +static int handle_help() +{ + app app_values; + app_values.name = MAN_PAGE; + app_values.info = " [option...]"; + app_values.title = HELP_HEADER; + app_values.doc = HELP_FOOTER; + app_values.man = MAN_PAGE; + app_values.block = 0; + + print_help(&app_values, cas_commands); + return 0; +} + +int main(int argc, const char *argv[]) +{ + int blocked = 0; + app app_values; + + set_default_sig_handler(); + set_safe_lib_constraint_handler(); + + app_values.name = argv[0]; + app_values.info = " [option...]"; + app_values.title = HELP_HEADER; + app_values.doc = HELP_FOOTER; + app_values.man = MAN_PAGE; + app_values.block = blocked; + + return args_parse(&app_values, cas_commands, argc, argv); +} diff --git a/casadm/csvparse.c b/casadm/csvparse.c new file mode 100644 index 000000000..623fd44e9 --- /dev/null +++ b/casadm/csvparse.c @@ -0,0 +1,483 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include +#include +#include +#include +#include "csvparse.h" +#include "cas_lib_utils.h" +#include "safeclib/safe_lib.h" +#include + +#define SUCCESS 0 +#define FAILURE 1 + +struct CSVFILE_t { + FILE *f; /**< underlying byte stream*/ + int num_columns; /**< number of columns in recently read + line of CSV file */ + int alloc_column_ptrs; /**< number of pointers to columns + that can be fit in columns buffer */ + char **columns; /**< buffer contains exactly one pointer to each + column of a csv file */ + char *buffer; /**< buffer to which recently read line of a csv file + is stored */ + int buffer_size; /**< size of a buffer */ + + char csv_comment; /**< character markng whole line comment. if set to null, + comments in file are not respected */ + char csv_separator; /**< csv separator (by default coma, but in some csv formats + it is something different */ +}; + +#define DEF_ALLOC_COL_PTRS 2 +#define DEF_CSV_FILE_BUFFER_SIZE 20 + +/* return error when input dataset size exceeds some common sense limitations */ +#define MAX_NUM_COLUMNS 100 +#define MAX_LINE_LENGTH 8192 + +CSVFILE *csv_open(const char *path, const char *mode) +{ + CSVFILE *csv; + + if (!path || !mode) { + return NULL; + } + + /* open underlying file as a character stream */ + FILE *f = fopen(path, mode); + if (!f) { + return NULL; + } + + csv = csv_fopen(f); + if (NULL == csv) { + fclose(f); + return NULL; + } + + return csv; +} + +CSVFILE *csv_fopen(FILE *f) +{ + CSVFILE *cf = malloc(sizeof(*cf)); + if (!cf) { + return NULL; + } + /* allocate storage for columns of CSV file */ + cf->num_columns = 0; + cf->alloc_column_ptrs = DEF_ALLOC_COL_PTRS; + + cf->columns = malloc(cf->alloc_column_ptrs * sizeof(char *)); + if (!cf->columns) { + free(cf); + return NULL; + } + + /* allocate storage for line of CSV file */ + cf->buffer_size = DEF_CSV_FILE_BUFFER_SIZE; + cf->buffer = malloc(cf->buffer_size); + if (!cf->buffer) { + free(cf->columns); + free(cf); + return NULL; + } + + /* assign underlying file as a character stream */ + cf->f = f; + + cf->csv_separator = ','; + cf->csv_comment = 0; + + return cf; +} + +void csv_close(CSVFILE *cf) +{ + fclose(cf->f); + csv_close_nu(cf); +} + +void csv_close_nu(CSVFILE *cf) +{ + free(cf->columns); + free(cf->buffer); + memset(cf, 0, sizeof(*cf)); + free(cf); +} + +/** + * internal helper function for the library. + */ +static int ensure_items_array(CSVFILE *cf) +{ + if (cf->num_columns > MAX_NUM_COLUMNS) { + return FAILURE; + } else if (cf->num_columns < cf->alloc_column_ptrs) { + return SUCCESS; + } else { + char **tmp; + cf->alloc_column_ptrs = cf->num_columns * 2; + tmp = + realloc(cf->columns, + cf->alloc_column_ptrs * sizeof(char *)); + if (!tmp) { + return FAILURE; + } else { + cf->columns = tmp; + return SUCCESS; + } + } +} + +/** + * Function checks if CSV file is a valid one. + */ +bool csv_is_valid(CSVFILE *cf) +{ + if (!cf) { + return false; + } else if (!cf->f) { + return false; + } else if (!cf->columns) { + return false; + } else if (!cf->buffer) { + return false; + } else { + return true; + } +} + +static int csv_read_line(CSVFILE *cf) +{ + char *line; + char *c; + int i, len; + int already_read = 0; + /* fgets reads at most buffer_size-1 characters and always places NULL + * at the end. */ + + while (true) { + line = fgets(cf->buffer + already_read, + cf->buffer_size - already_read, cf->f); + if (!line) { + return FAILURE; + } + line = cf->buffer; + /* check that entire line was read; if failed, expand buffer and retry + * or (in case of eof) be happy with what we have */ + c = line; + i = 0; + + while (*c && *c != '\n') { + c++; + i++; + } + len = i; + if (len > MAX_LINE_LENGTH) { + return FAILURE; + } + + /* buffer ends with 0 while it is not an EOF - sign that we have NOT read entire line + * - try to expand buffer*/ + if (!*c && !feof(cf->f)) { + already_read = cf->buffer_size - 1; + cf->buffer_size *= 2; + char *tmp = realloc(cf->buffer, cf->buffer_size); + + if (tmp) { + cf->buffer = tmp; + continue; + } else { + return FAILURE; + } + } + + if (cf->buffer[i] == '\n') { + cf->buffer[i] = 0; + } + break; + } + return SUCCESS; +} + +int csv_read(CSVFILE *cf) +{ + int i, j, spaces_at_end; + bool parsing_token = false; /* if false, "cursor" is over whitespace, otherwise + * it is over part of token */ + + bool quotation = false; + if (!csv_is_valid(cf)) { + return FAILURE; + } + if (csv_read_line(cf)) { + return FAILURE; + } + + i = 0; + cf->num_columns = 0; + cf->columns[0] = 0; + spaces_at_end = 0; + + while (cf->buffer[i]) { + if (quotation) { /* handling text within quotation marks - + * ignore commas in this kind of text and don't strip spaces */ + if (cf->buffer[i] == '"' && cf->buffer[i + 1] == '"') { + /* double quotation mark is considered escaped quotation by + * Micros~1 Excel. We should do likewise */ + if (!parsing_token) { /* start of an cf->buffer */ + cf->columns[cf->num_columns] = + &cf->buffer[i]; + parsing_token = true; + } + ++i; + memmove_s(cf->columns[cf->num_columns] + 1, + cf->buffer_size - (cf->columns[cf->num_columns] - cf->buffer), + cf->columns[cf->num_columns], + &cf->buffer[i] - cf->columns[cf->num_columns]); + cf->columns[cf->num_columns]++; + } else if (cf->buffer[i] == '"') { + quotation = false; + parsing_token = false; + cf->buffer[i] = 0; + } else if (!parsing_token) { /* start of an cf->buffer */ + cf->columns[cf->num_columns] = &cf->buffer[i]; + parsing_token = true; + } + } else { /* handling text outside quotation mark */ + if (cf->buffer[i] == cf->csv_separator) { + (cf->num_columns)++; + if (ensure_items_array(cf)) { + return FAILURE; + } + cf->columns[cf->num_columns] = 0; + parsing_token = false; + cf->buffer[i] = 0; + for (j = i - spaces_at_end; j != i; ++j) { + cf->buffer[j] = 0; + } + + } else if (cf->buffer[i] == '"') { + quotation = true; + spaces_at_end = 0; + } else if (cf->csv_comment + && cf->buffer[i] == cf->csv_comment) { + cf->buffer[i] = 0; + break; + } else if (!isspace(cf->buffer[i])) { + if (!parsing_token) { /* start of an cf->buffer */ + if (!cf->columns[cf->num_columns]) { + cf->columns[cf->num_columns] = + &cf->buffer[i]; + } + parsing_token = true; + } + spaces_at_end = 0; + } else { /* no token.; clear spaces, possibly */ + parsing_token = false; + spaces_at_end++; + } + } + ++i; + } + + for (j = i - spaces_at_end; j != i; ++j) { + cf->buffer[j] = 0; + } + + /*always consider empty line to have exactly one empty column */ + cf->num_columns++; + + for (j = 0; j != cf->num_columns; ++j) { + /* if no columns were detected during parse, make sure that columns[x] + * points to an empty string and not into (NULL) */ + if (!cf->columns[j]) { /* so that empty columns will return empty string and + not a null-pointer */ + cf->columns[j] = &cf->buffer[i]; + } + } + + return SUCCESS; +} + +unsigned int csv_count_cols(CSVFILE *line) +{ + return line->num_columns; +} + +int csv_empty_line(CSVFILE *cf) +{ + if (!csv_is_valid(cf)) { + return FAILURE; + } + if (0 == csv_count_cols(cf)) { + return 1; + } else if (1 == csv_count_cols(cf)) { + const char *value = csv_get_col(cf, 0); + if (strempty(value)) { + return 1; + } + } + + return 0; +} + +char *csv_get_col(CSVFILE *cf, int coln) +{ + if (!csv_is_valid(cf)) { + return NULL; + } + return cf->columns[coln]; +} + +char **csv_get_col_ptr(CSVFILE *cf) +{ + return cf->columns; +} + +void csv_seek_beg(CSVFILE *cf) +{ + fseek(cf->f, 0, SEEK_SET); +} + +int csv_feof(CSVFILE *cf) +{ + return feof(cf->f); +} + +int csv_print(const char *path) +{ + int i, j, k; /* column, line, row, within column */ + int num_col_lengths = DEF_ALLOC_COL_PTRS; + static const int def_col_len = 5; + int actual_num_cols = 1; + + CSVFILE *cf = csv_open(path, "r"); + if (!cf) { + return FAILURE; + } + + int *col_lengths = malloc(num_col_lengths * sizeof(int)); + if (!col_lengths) { + csv_close(cf); + return FAILURE; + } + + for (i = 0; i != num_col_lengths; ++i) { + col_lengths[i] = def_col_len; + } + + /*calculate length of each column */ + i = j = 0; + while (!csv_read(cf)) { + int num_cols = csv_count_cols(cf); + if (num_cols > actual_num_cols) { + actual_num_cols = num_cols; + } + + if (num_cols > num_col_lengths) { + /* CSV file happens to have more columns, than we have allocated + * memory for */ + int *tmp = + realloc(col_lengths, num_cols * 2 * sizeof(int)); + if (!tmp) { + free(col_lengths); + csv_close(cf); + return FAILURE; + } + /* reallocation successful */ + col_lengths = tmp; + for (i = num_col_lengths; i != num_cols * 2; ++i) { + col_lengths[i] = def_col_len; + } + num_col_lengths = num_cols * 2; + } + + for (i = 0; i != csv_count_cols(cf); ++i) { + int len = strnlen(csv_get_col(cf, i), MAX_STR_LEN); + if (col_lengths[i] < len) { + col_lengths[i] = len; + } + } + ++j; + } + + /*actually format pretty table */ + csv_seek_beg(cf); + printf(" | "); + + for (i = 0; i != actual_num_cols; ++i) { + int before = col_lengths[i] / 2; + + for (k = 0; k != before; ++k) { + putchar(' '); + } + putchar(i + 'A'); + for (k = 0; k != col_lengths[i] - before - 1; ++k) { + putchar(' '); + } + printf(" | "); + } + printf("\n-----|-"); + + for (i = 0; i != actual_num_cols; ++i) { + for (k = 0; k != col_lengths[i]; ++k) { + putchar('-'); + } + printf("-|-"); + } + printf("\n"); + + j = 1; + while (!csv_read(cf)) { + printf("%4d | ", j); + int num_cols = csv_count_cols(cf); + for (i = 0; i != actual_num_cols; ++i) { + if (i < num_cols) { + char *c = csv_get_col(cf, i); + for (k = 0; c[k]; k++) { + putchar(c[k]); + } + } else { + k = 0; + } + for (; k != col_lengths[i]; ++k) { + putchar(' '); + } + printf(" | "); + } + ++j; + putchar('\n'); + } + + free(col_lengths); + csv_close(cf); + return SUCCESS; +} + +#ifdef __CSV_SAMPLE__ +/** + * usage example for csvparse library + * gcc -ggdb csvparse.c -I../common -D__CSV_SAMPLE__ -ocsvsample + */ +int main() +{ + puts("Validated configurations to run Intel CAS"); + csv_print("../../tools/build_installer/utils/validated_configurations.csv"); + putchar('\n'); + + puts("IO Classes for Intel CAS"); + csv_print("../../tools/build_installer/utils/default_ioclasses.csv"); + putchar('\n'); + +} +#endif diff --git a/casadm/csvparse.h b/casadm/csvparse.h new file mode 100644 index 000000000..2de03a5da --- /dev/null +++ b/casadm/csvparse.h @@ -0,0 +1,103 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CSVPARSE_H_ +#define __CSVPARSE_H_ +#include +/** + * @file + * @brief Generic CSV input/output library + * + */ + +/** + * data structure holding info about CSV file being read. + * @note there is no need to directly manipulate any field of this structure. + * Csvparse library handles everything. + */ +struct CSVFILE_t; + +/** + * This is to mimic semantics of stdio FILE*, which also is a typedef for a structure. + */ +typedef struct CSVFILE_t CSVFILE; + + +CSVFILE *csv_open(const char *path, const char *mode); + +CSVFILE *csv_fopen(FILE *f); +CSVFILE *csv_fdopen(int fd); + + +/** + * close csv file. this is a direct counterpart to csv_open + */ +void csv_close(CSVFILE *cf); + +/** + * close a csv without closing underlying plain fle object (so that all + * structures allocated by csv parsere are freed but syscall close(2) isn't issued + * - this is designed as counterpart to csv_fopen or csv_fdopen + */ +void csv_close_nu(CSVFILE *cf); + +/** + * @param cf csv file handle to read + * + * Read line from CSV file; return 0 if line was successfully read + * return nonzero if eof or error was observed + * Error may mean end of file or i.e. memory allocation error for temporary buffers + */ +int csv_read(CSVFILE *cf); + +/** + * @return true if end of file occured. + */ +int csv_feof(CSVFILE *cf); + +/** + * return number of columns + * @return # of columns in a csv file + */ +unsigned int csv_count_cols(CSVFILE *line); + +/** + * return given column of recently read row + * @param coln - column number + * @return pointer to field of csv file as a string; no range checking is performed, + * so if coln given exceeds actual number of columns defined in this row, error will occur + */ +char* csv_get_col(CSVFILE *cf, int coln); + +/** + * return entire row as a set of pointers to individual columns (unchecked function + * returns internal representation. state is guaranteed to be correct only when + * csv_read returned success; + */ +char** csv_get_col_ptr(CSVFILE *cf); + +/** + * Check if current line is empty + * + * @param cf - CVS file instance + * @retval 1 - empty line + * @retval 0 - no empty line + */ +int csv_empty_line(CSVFILE *cf); + +/** + * Seek to the begining of CSV file; this allows reading file again, from the begining + */ +void csv_seek_beg(CSVFILE *cf); + +/** + * This function prints CVS file in human readable format to the STD output + * + * @param path - Path to the CVS file + * @return Operation status. 0 - Success, otherwise error during printing + */ +int csv_print(const char *path); + +#endif diff --git a/casadm/extended_err_msg.c b/casadm/extended_err_msg.c new file mode 100644 index 000000000..6774d6092 --- /dev/null +++ b/casadm/extended_err_msg.c @@ -0,0 +1,244 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include +#include "safeclib/safe_lib.h" +#include +#include "extended_err_msg.h" + +struct { + int cas_error; + const char *msg; +} static cas_error_code_map[] = { + + /* IOC error mappings*/ + { + OCF_ERR_INVAL, + "Invalid input parameter" + }, + { + OCF_ERR_INVAL_VOLUME_TYPE, + "Invalid volume type" + }, + { + OCF_ERR_INTR, + "Interrupted by a signal" + }, + { + OCF_ERR_UNKNOWN, + "Unknown error occurred" + }, + { + OCF_ERR_TOO_MANY_CACHES, + "Too many caches" + }, + { + OCF_ERR_NO_MEM, + "Not enough memory to allocate a new cache device" + }, + { + OCF_ERR_NO_FREE_RAM, + "Not enough free RAM for cache metadata to start cache" + }, + { + OCF_ERR_START_CACHE_FAIL, + "Failed to insert cache" + }, + { + OCF_ERR_CACHE_IN_USE, + "At least one cas device is still in use" + }, + { + OCF_ERR_CACHE_NOT_EXIST, + "Cache ID does not exist" + }, + { + OCF_ERR_CACHE_EXIST, + "Cache ID already exists" + }, + { + OCF_ERR_TOO_MANY_CORES, + "Too many core devices in cache" + }, + { + OCF_ERR_CORE_NOT_AVAIL, + "Core device not available" + }, + { + OCF_ERR_CACHE_NOT_AVAIL, + "Cache device not available" + }, + { + OCF_ERR_IO_CLASS_NOT_EXIST, + "No such IO class ID in the cache" + }, + { + OCF_ERR_WRITE_CACHE, + "Error while writing to cache device" + }, + { + OCF_ERR_WRITE_CORE, + "Error while writing to core device" + }, + { + OCF_ERR_DIRTY_SHUTDOWN, + "Please use --load option to restore previous cache state " + "(Warning: data corruption may happen)\nOr initialize your " + "cache using --force option. Warning: All dirty data will be " + "lost!\n" + }, + { + OCF_ERR_DIRTY_EXISTS, + "Cache closed with dirty data.\nPlease start cache using " + "--load or --force option.\n" + }, + { + OCF_ERR_FLUSHING_INTERRUPTED, + "Flushing of core interrupted" + }, + { + OCF_ERR_CANNOT_ADD_CORE_TO_POOL, + "Error occurred during adding core device to core pool" + }, + { + OCF_ERR_CACHE_IN_INCOMPLETE_STATE, + "Cache is in incomplete state - at least one core is inactive" + }, + { + OCF_ERR_CORE_IN_INACTIVE_STATE, + "Core device is in inactive state" + }, + { + OCF_ERR_NOT_OPEN_EXC, + "Cannot open device exclusively" + }, + + /* CAS kernel error mappings*/ + { + KCAS_ERR_ROOT, + "Must be root" + }, + { + KCAS_ERR_SYSTEM, + "System Error" + }, + { + KCAS_ERR_BAD_RANGE, + "Range parameters are invalid" + }, + { + KCAS_ERR_DEV_SPACE, + "Illegal range, out of device space" + }, + { + KCAS_ERR_INV_IOCTL, + "Invalid ioctl" + }, + { + KCAS_ERR_DEV_PENDING, + "Device opens or mount are pending to this cache" + }, + { + KCAS_ERR_DIRTY_EXISTS_NVME, + "Cache device contains dirty data.\nIf you want to format it, " + "please use --force option.\nWarning: all data will be lost!" + }, + { + KCAS_ERR_FILE_EXISTS, + "Could not create exported object because file in /dev " + "directory exists" + }, + { + KCAS_ERR_IN_UPGRADE, + "Operation not allowed. CAS is in upgrade state" + }, + { + KCAS_ERR_UNALIGNED, + "Cache device logical sector size is greater than core device " + "logical sector size.\nConsider changing logical sector size " + "on current cache device \nor try other device with the same " + "logical sector size as core device." + }, + { + KCAS_ERR_NO_STORED_CONF, + "Internal kernel module error" }, + { + KCAS_ERR_ROLLBACK, + "Cannot restore previous configuration" + }, + { + KCAS_ERR_NOT_NVME, + "Given block device is not NVMe" + }, + { + KCAS_ERR_FORMAT_FAILED, + "Failed to format NVMe device" + }, + { + KCAS_ERR_NVME_BAD_FORMAT, + "NVMe is formatted to unsupported format" + }, + { + KCAS_ERR_CONTAINS_PART, + "Device contains partitions.\nIf you want to continue, " + "please use --force option.\nWarning: all data will be lost!" + }, + { + KCAS_ERR_A_PART, + "Formatting of partition is unsupported." + }, + { + KCAS_ERR_REMOVED_DIRTY, + "Flush error occured. Core has been set to detached state.\n" + "Warning: Core device may contain inconsistent data.\n" + "To access your data please add core back to the cache." + }, + { + KCAS_ERR_STOPPED_DIRTY, + "Cache has been stopped with flushing error.\n" + "Warning: Core devices may contain inconsistent data.\n" + "To access your data, please start cache with --load option." + }, + { + KCAS_ERR_NO_CACHE_ATTACHED, + "Operation not allowed. Caching device is not attached." + }, + { + KCAS_ERR_CORE_POOL_NOT_EMPTY, + "Operation not allowed. Core pool is not empty." + }, + { + KCAS_ERR_CLS_RULE_UNKNOWN_CONDITION, + "Unexpected classification rule condition" + }, + { + KCAS_ERR_CLS_RULE_INVALID_SYNTAX, + "Invalid classification rule syntax" + }, + + +}; + +const char *cas_strerr(int cas_error_code) +{ + int i; + int count = sizeof(cas_error_code_map) / sizeof(cas_error_code_map[0]); + + if (cas_error_code == 0) + return NULL; /* No Error */ + + cas_error_code = abs(cas_error_code); + + for (i = 0; i < count; i++) { + if (cas_error_code_map[i].cas_error == cas_error_code) + return cas_error_code_map[i].msg; + } + + return strerror(cas_error_code); +} + diff --git a/casadm/extended_err_msg.h b/casadm/extended_err_msg.h new file mode 100644 index 000000000..f71de21bb --- /dev/null +++ b/casadm/extended_err_msg.h @@ -0,0 +1,6 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +const char *cas_strerr(int cas_error_code); diff --git a/casadm/intvector.c b/casadm/intvector.c new file mode 100644 index 000000000..fe7bb1f4b --- /dev/null +++ b/casadm/intvector.c @@ -0,0 +1,110 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include "intvector.h" + +#define DEFAULT_CAPACITY 11 + + +struct intvector *vector_alloc() +{ + struct intvector *v = malloc(sizeof(struct intvector)); + if (!v) { + return 0; + } + if (vector_alloc_placement(v)) { + free(v); + return 0; + } + return v; +} + +int vector_alloc_placement(struct intvector *v) +{ + v->content = malloc(sizeof(int) * DEFAULT_CAPACITY); + if (!v->content) { + return 1; + } + v->size = 0; + v->capacity = DEFAULT_CAPACITY; + return 0; +} + +int vector_reserve(struct intvector *v, int s) +{ + if (s < DEFAULT_CAPACITY || s < v->capacity) { + return 0; + } + + void *tmp = realloc(v->content, s*sizeof(int)); + if (!tmp) { + return 1; + } + + v->content = tmp; + v->capacity = s; + return 0; +} +void vector_free_placement(struct intvector *v) +{ + free(v->content); +} + +void vector_free(struct intvector *v) +{ + vector_free_placement(v); + free(v); +} + +int vector_get(struct intvector *v, int i) +{ + return v->content[i]; +} + +int vector_set(struct intvector *v, int i, int x) +{ + v->content[i]=x; + return 0; +} + +int vector_zero(struct intvector *v) +{ + memset(v->content, 0, sizeof(int) * v->size); + return 0; +} + +int vector_push_back(struct intvector *v, int x) +{ + if (vector_capacity(v) == vector_size(v)) { + if (vector_reserve(v, v->size*2)) { + return 1; + } + } + + vector_set(v, v->size, x); + v->size++; + return 0; +} + +int vector_size(struct intvector *v) +{ + return v->size; +} + +int vector_capacity(struct intvector *v) +{ + return v->capacity; +} + +int vector_resize(struct intvector *v, int s) +{ + if (vector_reserve(v, s)) { + return 1; + } + v->size = s; + return 0; +} diff --git a/casadm/intvector.h b/casadm/intvector.h new file mode 100644 index 000000000..9b05c5721 --- /dev/null +++ b/casadm/intvector.h @@ -0,0 +1,40 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __INTVECTOR_H +#define __INTVECTOR_H +struct intvector +{ + int capacity; + int size; + int *content; +}; +/* names of these functions (mostly) correspond to std::vector */ + +struct intvector *vector_alloc(); + +int vector_alloc_placement(struct intvector *v); + +int vector_reserve(struct intvector *v, int s); + +void vector_free(struct intvector *v); + +void vector_free_placement(struct intvector *v); + +int vector_get(struct intvector *v, int i); + +int vector_set(struct intvector *v, int i, int x); + +int vector_zero(struct intvector *v); + +int vector_push_back(struct intvector *v, int x); + +int vector_size(struct intvector *v); + +int vector_capacity(struct intvector *v); + +int vector_resize(struct intvector *v, int s); + +#endif diff --git a/casadm/ocf_env.h b/casadm/ocf_env.h new file mode 100644 index 000000000..7b724e2f4 --- /dev/null +++ b/casadm/ocf_env.h @@ -0,0 +1,27 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef OCF_ENV_H_ +#define OCF_ENV_H_ + +#include +#include +#include "safeclib/safe_lib.h" + +#define min(x, y) ({ x < y ? x : y; }) + +#define ENV_BUG_ON(cond) ({ if (cond) exit(1); }) + +/* *** STRING OPERATIONS *** */ + +#define env_memset memset_s +#define env_memcpy memcpy_s +#define env_memcmp memcmp_s + +#define env_strnlen strnlen_s +#define env_strncmp strncmp +#define env_strncpy strncpy_s + +#endif /* OCF_ENV_H_ */ diff --git a/casadm/ocf_env_headers.h b/casadm/ocf_env_headers.h new file mode 100644 index 000000000..16d62463e --- /dev/null +++ b/casadm/ocf_env_headers.h @@ -0,0 +1,22 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __OCF_ENV_HEADERS_H__ +#define __OCF_ENV_HEADERS_H__ + +#include +#include +#include + +/* TODO: Move prefix printing to context logger. */ +#define OCF_LOGO "CAS" +#define OCF_PREFIX_SHORT "[" OCF_LOGO "] " +#define OCF_PREFIX_LONG "Cache Acceleration Software Linux" + +#define OCF_VERSION_MAIN CAS_VERSION_MAIN +#define OCF_VERSION_MAJOR CAS_VERSION_MAJOR +#define OCF_VERSION_MINOR CAS_VERSION_MINOR + +#endif /* __OCF_ENV_HEADERS_H__ */ diff --git a/casadm/psort.c b/casadm/psort.c new file mode 100644 index 000000000..6cfbd9257 --- /dev/null +++ b/casadm/psort.c @@ -0,0 +1,198 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "safeclib/safe_mem_lib.h" +#include "psort.h" + +/** + * internal state of parallel sort algorithm (entire task for main thread, + * subtask for children). + */ +struct psort_state +{ + void *defbase; /*!< base of master buffer sort + * - needed to compute offsets*/ + int spawn_threads; + void *base; /*!< base of subtask */ + size_t nmemb; /*!< number of members in subtask */ + size_t size; /*!< of each member */ + compar_t compar; + int result; /*!< partial result */ + void *tmpbuf; /*!< temporary buffer for purpose of merge algorithm */ +}; + +void memcpy_replacement(void *dst, void *src, size_t size) +{ + /** + * Avoid this if possible. memcpy_s leads to crappy performance + * defeating purpose of entire optimized sort. + */ + memcpy_s(dst, size, src, size); +} + +/** + * merge algorithm has O(N) spatial complexity and O(N) time complexity + */ +void merge_ranges(void *base, size_t nmemb1, size_t nmemb2, size_t size, + compar_t compar, void *tmpbuf) +{ + void *target_buf = tmpbuf; + int i1, i2; + + for (i1 = i2 = 0; i1 < nmemb1 || i2 < nmemb2;) { + bool lil; /* lil means "left is less" */ + if (i1 == nmemb1) { + lil = false; + } else if (i2 ==nmemb2) { + lil = true; + } else if (compar(base + i1 * size, + base + (nmemb1 +i2) * size) < 0) { + lil = true; + } else { + lil = false; + + } + if (lil) { + memcpy_replacement(target_buf + (i1 + i2) * size, + base + i1 * size, + size); + i1++; + } else { + memcpy_replacement(target_buf + (i1 + i2) * size, + base + (nmemb1 + i2) * size, + size); + i2++; + } + } + memcpy_replacement(base, target_buf, (nmemb1 + nmemb2) * size); +} + +/** + * Execute quicksort on part or entirety of subrange. If subranges taken into + * account, than merge partial sort results. + * + * Complexity | time | spatial + * --------------------+-------------------+----------- + * Quick Sort | O(n*lg(n)/ncpu) | O(1) + * Merging | O(n) | O(N) + * --------------------+-------------------+----------- + * Entire algorithm | O(n+n*lg(n)/ncpu) | O(N) + * + * Effectively for suficiently large number of CPUs, sorting time + * becomes linear to dataset: + * \lim{ncpu \rightarrow \infty} O(n+\frac{n*lg(n)}{ncpu}) = O(n + 0^+) = O(n) + * Less can't be achieved, as last merge can't be parallelized. + */ +void *psort_thread_fun(void *arg_v) +{ + pthread_t thread; + struct psort_state* arg = arg_v; + struct psort_state base_state; + struct psort_state child_state; + memcpy_replacement(&base_state, arg, sizeof(base_state)); + if (arg->spawn_threads > 1) { + /* local state (assume, input state is unmodifiable) */ + memcpy_replacement(&child_state, arg, sizeof(base_state)); + + base_state.spawn_threads /= 2; + child_state.spawn_threads = arg->spawn_threads + - base_state.spawn_threads; + + base_state.nmemb /= 2; + child_state.nmemb = arg->nmemb - base_state.nmemb; + + child_state.base += base_state.size * + base_state.nmemb; + /* spawn child */ + if (pthread_create(&thread, 0, psort_thread_fun, &child_state)) { + /* failed to create thread */ + arg->result = -errno; + return arg_v; + } + } + + if (1 == base_state.spawn_threads) { + qsort(base_state.base, base_state.nmemb, + base_state.size, base_state.compar); + } else { + psort_thread_fun(&base_state); + if (base_state.result) { + arg->result = base_state.result; + } + } + + if (arg->spawn_threads > 1) { + if (pthread_join(thread, 0)) { + arg->result = -errno; + return arg_v; + } + if (child_state.result) { + arg->result = child_state.result; + return arg_v; + } + if (!arg->result) { + merge_ranges(arg->base, base_state.nmemb, + child_state.nmemb, arg->size, + arg->compar, + arg->tmpbuf + (base_state.base + - base_state.defbase)); + } + } + return arg_v; +} + +/** + * actual parallel sorting entry point + */ +int psort_main(void *base, size_t nmemb, size_t size, + compar_t compar, int ncpu) +{ + struct psort_state base_state; + /* use half the number of logical CPUs for purpose of sorting */ + base_state.spawn_threads = ncpu; + /* current num of CPUs */ + base_state.defbase = base; + base_state.base = base; + base_state.nmemb = nmemb; + base_state.size = size; + base_state.compar = compar; + base_state.tmpbuf = malloc(size * nmemb); + base_state.result = 0; + if (!base_state.tmpbuf) { + return -1; + } + psort_thread_fun(&base_state); + free(base_state.tmpbuf); + return base_state.result; +} + +void psort(void *base, size_t nmemb, size_t size, + compar_t compar) +{ + /* entry point to psort */ + int ncpu = sysconf(_SC_NPROCESSORS_ONLN)/2; + int maxncpu = nmemb / 1024; + if (maxncpu < ncpu) { + ncpu = maxncpu; + } + /* don't invoke actual psort when less than 2 threads are needed */ + if (ncpu < 2) { + qsort(base, nmemb, size, compar); + } else { + if (psort_main(base, nmemb, size, compar, ncpu)) { + /* if parallel sorting failed (i.e. due to failed thread + * creation, fall back to single threaded operation */ + qsort(base, nmemb, size, compar); + } + } +} diff --git a/casadm/psort.h b/casadm/psort.h new file mode 100644 index 000000000..36c961121 --- /dev/null +++ b/casadm/psort.h @@ -0,0 +1,22 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef PSORT_H +#define PSORT_H +typedef int (*compar_t)(const void *, const void *); + +/** + * function does exactly the same thing as qsort, except, that it sorts + * using many CPU cores, not just one. + * + * number of CPU cores is configured as half of the number of online + * CPUs in the system. + */ +void psort(void *base, size_t nmemb, size_t size, + compar_t compar); + + +#endif + diff --git a/casadm/safeclib/ignore_handler_s.c b/casadm/safeclib/ignore_handler_s.c new file mode 100644 index 000000000..54b65c9a2 --- /dev/null +++ b/casadm/safeclib/ignore_handler_s.c @@ -0,0 +1,72 @@ +/*------------------------------------------------------------------ + * ignore_handler_s.c + * + * 2012, Jonathan Toppins + * + * Copyright (c) 2012 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" + +/** + * NAME + * ignore_handler_s + * + * SYNOPSIS + * #include "safe_lib.h" + * void ignore_handler_s(const char *msg, void *ptr, errno_t error) + * + * DESCRIPTION + * This function simply returns to the caller. + * + * SPECIFIED IN + * ISO/IEC JTC1 SC22 WG14 N1172, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * msg Pointer to the message describing the error + * + * ptr Pointer to aassociated data. Can be NULL. + * + * error The error code encountered. + * + * RETURN VALUE + * Returns no value. + * + * ALSO SEE + * abort_handler_s() + * + */ + +void ignore_handler_s(const char *msg, void *ptr, errno_t error) +{ + + sldebug_printf("IGNORE CONSTRAINT HANDLER: (%u) %s\n", error, + (msg) ? msg : "Null message"); + return; +} +EXPORT_SYMBOL(ignore_handler_s); diff --git a/casadm/safeclib/mem_primitives_lib.c b/casadm/safeclib/mem_primitives_lib.c new file mode 100644 index 000000000..cc189e5ea --- /dev/null +++ b/casadm/safeclib/mem_primitives_lib.c @@ -0,0 +1,853 @@ +/*------------------------------------------------------------------ + * mem_primitives_lib.c - Unguarded Memory Copy Routines + * + * February 2005, Bo Berry + * + * Copyright (c) 2005-2009 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "mem_primitives_lib.h" + +/* + * mem_primitives_lib.c provides unguarded memory routines + * that are used by the safe_mem_library. These routines + * may also be used by an application, but the application + * is responsible for all parameter validation and alignment. + */ + +/** + * NAME + * mem_prim_set - Sets memory to value + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_set(void *dest, uint32_t len, uint8_t value) + * + * DESCRIPTION + * Sets len bytes starting at dest to the specified value + * + * INPUT PARAMETERS + * dest - pointer to memory that will be set to value + * + * len - number of bytes to be set + * + * value - byte value + * + * OUTPUT PARAMETERS + * dest - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_set (void *dest, uint32_t len, uint8_t value) +{ + uint8_t *dp; + uint32_t count; + uint32_t lcount; + + uint32_t *lp; + uint32_t value32; + + count = len; + + dp = dest; + + value32 = value | (value << 8) | (value << 16) | (value << 24); + + /* + * First, do the few bytes to get uint32_t aligned. + */ + for (; count && ( (uintptr_t)dp & (sizeof(uint32_t)-1) ); count--) { + *dp++ = value; + } + + /* + * Then do the uint32_ts, unrolled the loop for performance + */ + lp = (uint32_t *)dp; + lcount = count >> 2; + + while (lcount != 0) { + + switch (lcount) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *lp++ = value32; *lp++ = value32; *lp++ = value32; *lp++ = value32; + *lp++ = value32; *lp++ = value32; *lp++ = value32; *lp++ = value32; + *lp++ = value32; *lp++ = value32; *lp++ = value32; *lp++ = value32; + *lp++ = value32; *lp++ = value32; *lp++ = value32; *lp++ = value32; + lcount -= 16; + break; + + case 15: *lp++ = value32; + case 14: *lp++ = value32; + case 13: *lp++ = value32; + case 12: *lp++ = value32; + case 11: *lp++ = value32; + case 10: *lp++ = value32; + case 9: *lp++ = value32; + case 8: *lp++ = value32; + + case 7: *lp++ = value32; + case 6: *lp++ = value32; + case 5: *lp++ = value32; + case 4: *lp++ = value32; + case 3: *lp++ = value32; + case 2: *lp++ = value32; + case 1: *lp++ = value32; + lcount = 0; + break; + } + } /* end while */ + + + dp = (uint8_t *)lp; + + /* + * compute the number of remaining bytes + */ + count &= (sizeof(uint32_t)-1); + + /* + * remaining bytes + */ + for (; count; dp++, count--) { + *dp = value; + } + + return; +} + + +/** + * NAME + * mem_prim_set16 - Sets memory to value + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_set16(uint16_t *dp, uint32_t len, uint16_t value) + * + * DESCRIPTION + * Sets len uint16_ts starting at dest to the specified value. + * Pointers must meet system alignment requirements. + * + * INPUT PARAMETERS + * dest - pointer to memory that will be set to value + * + * len - number of uint16_ts to be set + * + * value - uint16_t value + * + * OUTPUT PARAMETERS + * dest - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_set16 (uint16_t *dp, uint32_t len, uint16_t value) +{ + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + len -= 16; + break; + + case 15: *dp++ = value; + case 14: *dp++ = value; + case 13: *dp++ = value; + case 12: *dp++ = value; + case 11: *dp++ = value; + case 10: *dp++ = value; + case 9: *dp++ = value; + case 8: *dp++ = value; + + case 7: *dp++ = value; + case 6: *dp++ = value; + case 5: *dp++ = value; + case 4: *dp++ = value; + case 3: *dp++ = value; + case 2: *dp++ = value; + case 1: *dp++ = value; + len = 0; + break; + } + } /* end while */ + + return; +} + + +/** + * NAME + * mem_prim_set32 - Sets memory to the uint32_t value + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_set32(uint32_t *dp, uint32_t len, uint32_t value) + * + * DESCRIPTION + * Sets len uint32_ts starting at dest to the specified value + * Pointers must meet system alignment requirements. + * + * INPUT PARAMETERS + * dest - pointer to memory that will be set to value + * + * len - number of uint32_ts to be set + * + * value - uint32_t value + * + * OUTPUT PARAMETERS + * dest - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_set32 (uint32_t *dp, uint32_t len, uint32_t value) +{ + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + *dp++ = value; *dp++ = value; *dp++ = value; *dp++ = value; + len -= 16; + break; + + case 15: *dp++ = value; + case 14: *dp++ = value; + case 13: *dp++ = value; + case 12: *dp++ = value; + case 11: *dp++ = value; + case 10: *dp++ = value; + case 9: *dp++ = value; + case 8: *dp++ = value; + + case 7: *dp++ = value; + case 6: *dp++ = value; + case 5: *dp++ = value; + case 4: *dp++ = value; + case 3: *dp++ = value; + case 2: *dp++ = value; + case 1: *dp++ = value; + len = 0; + break; + } + } /* end while */ + + return; +} + + +/** + * NAME + * mem_prim_move - Move (handles overlap) memory + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_move(void *dest, const void *src, uint32_t len) + * + * DESCRIPTION + * Moves at most slen bytes from src to dest, up to dmax + * bytes. Dest may overlap with src. + * + * INPUT PARAMETERS + * dest - pointer to the memory that will be replaced by src. + * + * src - pointer to the memory that will be copied + * to dest + * + * len - maximum number bytes of src that can be copied + * + * OUTPUT PARAMETERS + * dest - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_move (void *dest, const void *src, uint32_t len) +{ + +#define wsize sizeof(uint32_t) +#define wmask (wsize - 1) + + uint8_t *dp = dest; + const uint8_t *sp = src; + + uint32_t tsp; + + /* + * Determine if we need to copy forward or backward (overlap) + */ + if ((uintptr_t)dp < (uintptr_t)sp) { + /* + * Copy forward. + */ + + /* + * get a working copy of src for bit operations + */ + tsp = (uintptr_t)sp; + + /* + * Try to align both operands. This cannot be done + * unless the low bits match. + */ + if ((tsp | (uintptr_t)dp) & wmask) { + /* + * determine how many bytes to copy to align operands + */ + if ((tsp ^ (uintptr_t)dp) & wmask || len < wsize) { + tsp = len; + + } else { + tsp = wsize - (tsp & wmask); + } + + len -= tsp; + + /* + * make the alignment + */ + do { + *dp++ = *sp++; + } while (--tsp); + } + + /* + * Now copy, then mop up any trailing bytes. + */ + tsp = len / wsize; + + if (tsp > 0) { + + do { + *(uint32_t *)dp = *(uint32_t *)sp; + + sp += wsize; + dp += wsize; + } while (--tsp); + } + + /* + * copy over the remaining bytes and we're done + */ + tsp = len & wmask; + + if (tsp > 0) { + do { + *dp++ = *sp++; + } while (--tsp); + } + + } else { + /* + * This section is used to copy backwards, to handle any + * overlap. The alignment requires (tps&wmask) bytes to + * align. + */ + + /* + * go to end of the memory to copy + */ + sp += len; + dp += len; + + /* + * get a working copy of src for bit operations + */ + tsp = (uintptr_t)sp; + + /* + * Try to align both operands. + */ + if ((tsp | (uintptr_t)dp) & wmask) { + + if ((tsp ^ (uintptr_t)dp) & wmask || len <= wsize) { + tsp = len; + } else { + tsp &= wmask; + } + + len -= tsp; + + /* + * make the alignment + */ + do { + *--dp = *--sp; + } while (--tsp); + } + + /* + * Now copy in uint32_t units, then mop up any trailing bytes. + */ + tsp = len / wsize; + + if (tsp > 0) { + do { + sp -= wsize; + dp -= wsize; + + *(uint32_t *)dp = *(uint32_t *)sp; + } while (--tsp); + } + + /* + * copy over the remaining bytes and we're done + */ + tsp = len & wmask; + if (tsp > 0) { + tsp = len & wmask; + do { + *--dp = *--sp; + } while (--tsp); + } + } + + return; +} + + +/** + * NAME + * mem_prim_move8 - Move (handles overlap) memory + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_move8(void *dest, const void *src, uint32_t len) + * + * DESCRIPTION + * Moves at most len uint8_ts from sp to dp. + * The destination may overlap with source. + * + * INPUT PARAMETERS + * dp - pointer to the memory that will be replaced by sp. + * + * sp - pointer to the memory that will be copied + * to dp + * + * len - maximum number uint8_t of sp that can be copied + * + * OUTPUT PARAMETERS + * dp - pointer to the memory that will be replaced by sp. + * + * RETURN VALUE + * none + * + */ +void +mem_prim_move8 (uint8_t *dp, const uint8_t *sp, uint32_t len) +{ + + /* + * Determine if we need to copy forward or backward (overlap) + */ + if (dp < sp) { + /* + * Copy forward. + */ + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + len -= 16; + break; + + case 15: *dp++ = *sp++; + case 14: *dp++ = *sp++; + case 13: *dp++ = *sp++; + case 12: *dp++ = *sp++; + case 11: *dp++ = *sp++; + case 10: *dp++ = *sp++; + case 9: *dp++ = *sp++; + case 8: *dp++ = *sp++; + + case 7: *dp++ = *sp++; + case 6: *dp++ = *sp++; + case 5: *dp++ = *sp++; + case 4: *dp++ = *sp++; + case 3: *dp++ = *sp++; + case 2: *dp++ = *sp++; + case 1: *dp++ = *sp++; + len = 0; + break; + } + } /* end while */ + + } else { + /* + * This section is used to copy backwards, to handle any + * overlap. The alignment requires (tps&wmask) bytes to + * align. + */ + + + /* + * go to end of the memory to copy + */ + sp += len; + dp += len; + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + len -= 16; + break; + + case 15: *--dp = *--sp; + case 14: *--dp = *--sp; + case 13: *--dp = *--sp; + case 12: *--dp = *--sp; + case 11: *--dp = *--sp; + case 10: *--dp = *--sp; + case 9: *--dp = *--sp; + case 8: *--dp = *--sp; + + case 7: *--dp = *--sp; + case 6: *--dp = *--sp; + case 5: *--dp = *--sp; + case 4: *--dp = *--sp; + case 3: *--dp = *--sp; + case 2: *--dp = *--sp; + case 1: *--dp = *--sp; + len = 0; + break; + } + } /* end while */ + } + + return; +} + + +/** + * NAME + * mem_prim_move16 - Move (handles overlap) memory + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_move16(void *dest, const void *src, uint32_t len) + * + * DESCRIPTION + * Moves at most len uint16_ts from sp to dp. + * The destination may overlap with source. + * + * INPUT PARAMETERS + * dp - pointer to the memory that will be replaced by sp. + * + * sp - pointer to the memory that will be copied + * to dp + * + * len - maximum number uint16_t of sp that can be copied + * + * OUTPUT PARAMETERS + * dp - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_move16 (uint16_t *dp, const uint16_t *sp, uint32_t len) +{ + + /* + * Determine if we need to copy forward or backward (overlap) + */ + if (dp < sp) { + /* + * Copy forward. + */ + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + len -= 16; + break; + + case 15: *dp++ = *sp++; + case 14: *dp++ = *sp++; + case 13: *dp++ = *sp++; + case 12: *dp++ = *sp++; + case 11: *dp++ = *sp++; + case 10: *dp++ = *sp++; + case 9: *dp++ = *sp++; + case 8: *dp++ = *sp++; + + case 7: *dp++ = *sp++; + case 6: *dp++ = *sp++; + case 5: *dp++ = *sp++; + case 4: *dp++ = *sp++; + case 3: *dp++ = *sp++; + case 2: *dp++ = *sp++; + case 1: *dp++ = *sp++; + len = 0; + break; + } + } /* end while */ + + } else { + /* + * This section is used to copy backwards, to handle any + * overlap. The alignment requires (tps&wmask) bytes to + * align. + */ + + /* + * go to end of the memory to copy + */ + sp += len; + dp += len; + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + len -= 16; + break; + + case 15: *--dp = *--sp; + case 14: *--dp = *--sp; + case 13: *--dp = *--sp; + case 12: *--dp = *--sp; + case 11: *--dp = *--sp; + case 10: *--dp = *--sp; + case 9: *--dp = *--sp; + case 8: *--dp = *--sp; + + case 7: *--dp = *--sp; + case 6: *--dp = *--sp; + case 5: *--dp = *--sp; + case 4: *--dp = *--sp; + case 3: *--dp = *--sp; + case 2: *--dp = *--sp; + case 1: *--dp = *--sp; + len = 0; + break; + } + } /* end while */ + } + + return; +} + + +/** + * NAME + * mem_prim_move32 - Move (handles overlap) memory + * + * SYNOPSIS + * #include "mem_primitives_lib.h" + * void + * mem_prim_move32(void *dest, const void *src, uint32_t len) + * + * DESCRIPTION + * Moves at most len uint32_ts from sp to dp. + * The destination may overlap with source. + * + * INPUT PARAMETERS + * dp - pointer to the memory that will be replaced by sp. + * + * sp - pointer to the memory that will be copied + * to dp + * + * len - maximum number uint32_t of sp that can be copied + * + * OUTPUT PARAMETERS + * dp - is updated + * + * RETURN VALUE + * none + * + */ +void +mem_prim_move32 (uint32_t *dp, const uint32_t *sp, uint32_t len) +{ + + /* + * Determine if we need to copy forward or backward (overlap) + */ + if (dp < sp) { + /* + * Copy forward. + */ + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; *dp++ = *sp++; + len -= 16; + break; + + case 15: *dp++ = *sp++; + case 14: *dp++ = *sp++; + case 13: *dp++ = *sp++; + case 12: *dp++ = *sp++; + case 11: *dp++ = *sp++; + case 10: *dp++ = *sp++; + case 9: *dp++ = *sp++; + case 8: *dp++ = *sp++; + + case 7: *dp++ = *sp++; + case 6: *dp++ = *sp++; + case 5: *dp++ = *sp++; + case 4: *dp++ = *sp++; + case 3: *dp++ = *sp++; + case 2: *dp++ = *sp++; + case 1: *dp++ = *sp++; + len = 0; + break; + } + } /* end while */ + + } else { + /* + * This section is used to copy backwards, to handle any + * overlap. + */ + + /* + * go to end of the memory to copy + */ + sp += len; + dp += len; + + while (len != 0) { + + switch (len) { + /* + * Here we do blocks of 8. Once the remaining count + * drops below 8, take the fast track to finish up. + */ + default: + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; *--dp = *--sp; + len -= 16; + break; + + case 15: *--dp = *--sp; + case 14: *--dp = *--sp; + case 13: *--dp = *--sp; + case 12: *--dp = *--sp; + case 11: *--dp = *--sp; + case 10: *--dp = *--sp; + case 9: *--dp = *--sp; + case 8: *--dp = *--sp; + + case 7: *--dp = *--sp; + case 6: *--dp = *--sp; + case 5: *--dp = *--sp; + case 4: *--dp = *--sp; + case 3: *--dp = *--sp; + case 2: *--dp = *--sp; + case 1: *--dp = *--sp; + len = 0; + break; + } + } /* end while */ + } + + return; +} diff --git a/casadm/safeclib/mem_primitives_lib.h b/casadm/safeclib/mem_primitives_lib.h new file mode 100644 index 000000000..26c83d858 --- /dev/null +++ b/casadm/safeclib/mem_primitives_lib.h @@ -0,0 +1,74 @@ +/*------------------------------------------------------------------ + * mem_primitives_lib.h - Unguarded Memory Copy Routines + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __MEM_PRIMITIVES_LIB_H__ +#define __MEM_PRIMITIVES_LIB_H__ + +#include "safeclib_private.h" + +/* + * These are prototypes for _unguarded_ memory routines. The caller must + * validate all parameters prior to invocation. Useful for diagnostics + * and system initialization processing. + */ + +/* moves (handles overlap) memory */ +extern void +mem_prim_move(void *dest, const void *src, uint32_t length); + + +/* uint8_t moves (handles overlap) memory */ +extern void +mem_prim_move8(uint8_t *dest, const uint8_t *src, uint32_t length); + +/* uint16_t moves (handles overlap) memory */ +extern void +mem_prim_move16(uint16_t *dest, const uint16_t *src, uint32_t length); + +/* uint32_t moves (handles overlap) memory */ +extern void +mem_prim_move32(uint32_t *dest, const uint32_t *src, uint32_t length); + + +/* set bytes */ +extern void +mem_prim_set(void *dest, uint32_t dmax, uint8_t value); + +/* set uint16_ts */ +extern void +mem_prim_set16(uint16_t *dest, uint32_t dmax, uint16_t value); + +/* set uint32_ts */ +extern void +mem_prim_set32(uint32_t *dest, uint32_t dmax, uint32_t value); + + +#endif /* __MEM_PRIMITIVES_LIB_H__ */ diff --git a/casadm/safeclib/memcpy_s.c b/casadm/safeclib/memcpy_s.c new file mode 100644 index 000000000..a0d1f2e43 --- /dev/null +++ b/casadm/safeclib/memcpy_s.c @@ -0,0 +1,157 @@ +/*------------------------------------------------------------------ + * memcpy_s + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_mem_constraint.h" +#include "mem_primitives_lib.h" +#include "safe_mem_lib.h" + + +/** + * NAME + * memcpy_s + * + * SYNOPSIS + * #include "safe_mem_lib.h" + * errno_t + * memcpy_s(void *dest, rsize_t dmax, const void *src, rsize_t smax) + * + * DESCRIPTION + * This function copies at most smax bytes from src to dest, up to + * dmax. + * + * SPECIFIED IN + * ISO/IEC JTC1 SC22 WG14 N1172, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to memory that will be replaced by src. + * + * dmax maximum length of the resulting dest + * + * src pointer to the memory that will be copied to dest + * + * smax maximum number bytes of src to copy + * + * OUTPUT PARAMETERS + * dest is updated + * + * RUNTIME CONSTRAINTS + * Neither dest nor src shall be a null pointer. + * Neither dmax nor smax shall be zero. + * dmax shall not be greater than RSIZE_MAX_MEM. + * smax shall not be greater than dmax. + * Copying shall not take place between regions that overlap. + * If there is a runtime-constraint violation, the memcpy_s function + * stores zeros in the first dmax bytes of the region pointed to + * by dest if dest is not a null pointer and smax is valid. + * + * RETURN VALUE + * EOK successful operation + * ESNULLP NULL pointer + * ESZEROL zero length + * ESLEMAX length exceeds max limit + * ESOVRLP source memory overlaps destination + * + * ALSO SEE + * memcpy16_s(), memcpy32_s(), memmove_s(), memmove16_s(), + * memmove32_s() + * + */ +errno_t +memcpy_s (void *dest, rsize_t dmax, const void *src, rsize_t smax) +{ + uint8_t *dp; + const uint8_t *sp; + + dp = dest; + sp = src; + + if (dp == NULL) { + invoke_safe_mem_constraint_handler("memcpy_s: dest is NULL", + NULL, ESNULLP); + return RCNEGATE(ESNULLP); + } + + if (dmax == 0) { + invoke_safe_mem_constraint_handler("memcpy_s: dmax is 0", + NULL, ESZEROL); + return RCNEGATE(ESZEROL); + } + + if (dmax > RSIZE_MAX_MEM) { + invoke_safe_mem_constraint_handler("memcpy_s: dmax exceeds max", + NULL, ESLEMAX); + return RCNEGATE(ESLEMAX); + } + + if (smax == 0) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memcpy_s: smax is 0", + NULL, ESZEROL); + return RCNEGATE(ESZEROL); + } + + if (smax > dmax) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memcpy_s: smax exceeds dmax", + NULL, ESLEMAX); + return RCNEGATE(ESLEMAX); + } + + if (sp == NULL) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memcpy_s: src is NULL", + NULL, ESNULLP); + return RCNEGATE(ESNULLP); + } + + + /* + * overlap is undefined behavior, do not allow + */ + if( ((dp > sp) && (dp < (sp+smax))) || + ((sp > dp) && (sp < (dp+dmax))) ) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memcpy_s: overlap undefined", + NULL, ESOVRLP); + return RCNEGATE(ESOVRLP); + } + + /* + * now perform the copy + */ + mem_prim_move(dp, sp, smax); + + return RCNEGATE(EOK); +} +EXPORT_SYMBOL(memcpy_s); diff --git a/casadm/safeclib/memmove_s.c b/casadm/safeclib/memmove_s.c new file mode 100644 index 000000000..d71cc8899 --- /dev/null +++ b/casadm/safeclib/memmove_s.c @@ -0,0 +1,148 @@ +/*------------------------------------------------------------------ + * memmove_s.c + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_mem_constraint.h" +#include "mem_primitives_lib.h" +#include "safe_mem_lib.h" + +/** + * NAME + * memmove_s + * + * SYNOPSIS + * #include "safe_mem_lib.h" + * errno_t + * memmove_s(void *dest, rsize_t dmax, + * const void *src, rsize_t smax) + * + * DESCRIPTION + * The memmove_s function copies smax bytes from the region pointed + * to by src into the region pointed to by dest. This copying takes place + * as if the smax bytes from the region pointed to by src are first copied + * into a temporary array of smax bytes that does not overlap the region + * pointed to by dest or src, and then the smax bytes from the temporary + * array are copied into the object region to by dest. + * + * SPECIFIED IN + * ISO/IEC TR 24731, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to the memory that will be replaced by src. + * + * dmax maximum length of the resulting dest, in bytes + * + * src pointer to the memory that will be copied + * to dest + * + * smax maximum number bytes of src that can be copied + * + * OUTPUT PARAMETERS + * dest is updated + * + * RUNTIME CONSTRAINTS + * Neither dest nor src shall be a null pointer. + * Neither dmax nor smax shall be 0. + * dmax shall not be greater than RSIZE_MAX_MEM. + * smax shall not be greater than dmax. + * If there is a runtime-constraint violation, the memmove_s function + * stores zeros in the first dmax characters of the regionpointed to + * by dest if dest is not a null pointer and dmax is not greater + * than RSIZE_MAX_MEM. + * + * RETURN VALUE + * EOK successful operation + * ESNULLP NULL pointer + * ESZEROL zero length + * ESLEMAX length exceeds max limit + * + * ALSO SEE + * memmove16_s(), memmove32_s(), memcpy_s(), memcpy16_s() memcpy32_s() + * + */ +errno_t +memmove_s (void *dest, rsize_t dmax, const void *src, rsize_t smax) +{ + uint8_t *dp; + const uint8_t *sp; + + dp= dest; + sp = src; + + if (dp == NULL) { + invoke_safe_mem_constraint_handler("memmove_s: dest is null", + NULL, ESNULLP); + return (RCNEGATE(ESNULLP)); + } + + if (dmax == 0) { + invoke_safe_mem_constraint_handler("memmove_s: dmax is 0", + NULL, ESZEROL); + return (RCNEGATE(ESZEROL)); + } + + if (dmax > RSIZE_MAX_MEM) { + invoke_safe_mem_constraint_handler("memmove_s: dmax exceeds max", + NULL, ESLEMAX); + return (RCNEGATE(ESLEMAX)); + } + + if (smax == 0) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memmove_s: smax is 0", + NULL, ESZEROL); + return (RCNEGATE(ESZEROL)); + } + + if (smax > dmax) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memmove_s: smax exceeds max", + NULL, ESLEMAX); + return (RCNEGATE(ESLEMAX)); + } + + if (sp == NULL) { + mem_prim_set(dp, dmax, 0); + invoke_safe_mem_constraint_handler("memmove_s: src is null", + NULL, ESNULLP); + return (RCNEGATE(ESNULLP)); + } + + /* + * now perform the copy + */ + mem_prim_move(dp, sp, smax); + + return (RCNEGATE(EOK)); +} +EXPORT_SYMBOL(memmove_s); diff --git a/casadm/safeclib/memset_s.c b/casadm/safeclib/memset_s.c new file mode 100644 index 000000000..778239115 --- /dev/null +++ b/casadm/safeclib/memset_s.c @@ -0,0 +1,105 @@ +/*------------------------------------------------------------------ + * memset_s + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_mem_constraint.h" +#include "mem_primitives_lib.h" +#include "safe_mem_lib.h" + + +/** + * NAME + * memset_s + * + * SYNOPSIS + * #include "safe_mem_lib.h" + * errno_t + * memset_s(void *dest, rsize_t len, uint8_t value) + * + * DESCRIPTION + * Sets len bytes starting at dest to the specified value. + * + * SPECIFIED IN + * ISO/IEC JTC1 SC22 WG14 N1172, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to memory that will be set to the value + * + * len number of bytes to be set + * + * value byte value + * + * OUTPUT PARAMETERS + * dest is updated + * + * RUNTIME CONSTRAINTS + * dest shall not be a null pointer. + * len shall not be 0 nor greater than RSIZE_MAX_MEM. + * If there is a runtime constraint, the operation is not performed. + * + * RETURN VALUE + * EOK successful operation + * ESNULLP NULL pointer + * ESZEROL zero length + * ESLEMAX length exceeds max limit + * + * ALSO SEE + * memset16_s(), memset32_s() + * + */ +errno_t +memset_s (void *dest, rsize_t len, uint8_t value) +{ + if (dest == NULL) { + invoke_safe_mem_constraint_handler("memset_s: dest is null", + NULL, ESNULLP); + return (RCNEGATE(ESNULLP)); + } + + if (len == 0) { + invoke_safe_mem_constraint_handler("memset_s: len is 0", + NULL, ESZEROL); + return (RCNEGATE(ESZEROL)); + } + + if (len > RSIZE_MAX_MEM) { + invoke_safe_mem_constraint_handler("memset_s: len exceeds max", + NULL, ESLEMAX); + return (RCNEGATE(ESLEMAX)); + } + + mem_prim_set(dest, len, value); + + return (RCNEGATE(EOK)); +} +EXPORT_SYMBOL(memset_s); diff --git a/casadm/safeclib/safe_lib.h b/casadm/safeclib/safe_lib.h new file mode 100644 index 000000000..8e02899af --- /dev/null +++ b/casadm/safeclib/safe_lib.h @@ -0,0 +1,61 @@ +/*------------------------------------------------------------------ + * safe_lib.h -- Safe C Library + * + * October 2008, Bo Berry + * Modified 2012, Jonathan Toppins + * + * Copyright (c) 2008-2013 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_LIB_H__ +#define __SAFE_LIB_H__ + +#include "safe_types.h" +#include "safe_lib_errno.h" + +/* C11 appendix K types - specific for bounds checking */ +typedef size_t rsize_t; + +/* + * We depart from the standard and allow memory and string operations to + * have different max sizes. See the respective safe_mem_lib.h or + * safe_str_lib.h files. + */ +/* #define RSIZE_MAX (~(rsize_t)0) - leave here for completeness */ + +typedef void (*constraint_handler_t) (const char * /* msg */, + void * /* ptr */, + errno_t /* error */); + +extern void abort_handler_s(const char *msg, void *ptr, errno_t error); +extern void ignore_handler_s(const char *msg, void *ptr, errno_t error); + +#define sl_default_handler ignore_handler_s + +#include "safe_mem_lib.h" +#include "safe_str_lib.h" + +#endif /* __SAFE_LIB_H__ */ diff --git a/casadm/safeclib/safe_lib_errno.h b/casadm/safeclib/safe_lib_errno.h new file mode 100644 index 000000000..a27e0f281 --- /dev/null +++ b/casadm/safeclib/safe_lib_errno.h @@ -0,0 +1,91 @@ +/*------------------------------------------------------------------ + * safe_lib_errno.h -- Safe C Lib Error codes + * + * October 2008, Bo Berry + * Modified 2012, Jonathan Toppins + * + * Copyright (c) 2008-2013 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_LIB_ERRNO_H__ +#define __SAFE_LIB_ERRNO_H__ + +#ifdef __KERNEL__ +# include +#else +#include +#endif /* __KERNEL__ */ + +/* + * Safe Lib specific errno codes. These can be added to the errno.h file + * if desired. + */ +#ifndef ESNULLP +#define ESNULLP ( 400 ) /* null ptr */ +#endif + +#ifndef ESZEROL +#define ESZEROL ( 401 ) /* length is zero */ +#endif + +#ifndef ESLEMIN +#define ESLEMIN ( 402 ) /* length is below min */ +#endif + +#ifndef ESLEMAX +#define ESLEMAX ( 403 ) /* length exceeds max */ +#endif + +#ifndef ESOVRLP +#define ESOVRLP ( 404 ) /* overlap undefined */ +#endif + +#ifndef ESEMPTY +#define ESEMPTY ( 405 ) /* empty string */ +#endif + +#ifndef ESNOSPC +#define ESNOSPC ( 406 ) /* not enough space for s2 */ +#endif + +#ifndef ESUNTERM +#define ESUNTERM ( 407 ) /* unterminated string */ +#endif + +#ifndef ESNODIFF +#define ESNODIFF ( 408 ) /* no difference */ +#endif + +#ifndef ESNOTFND +#define ESNOTFND ( 409 ) /* not found */ +#endif + +/* EOK may or may not be defined in errno.h */ +#ifndef EOK +#define EOK ( 0 ) +#endif + +#endif /* __SAFE_LIB_ERRNO_H__ */ diff --git a/casadm/safeclib/safe_mem_constraint.c b/casadm/safeclib/safe_mem_constraint.c new file mode 100644 index 000000000..00e82a2f8 --- /dev/null +++ b/casadm/safeclib/safe_mem_constraint.c @@ -0,0 +1,142 @@ +/*------------------------------------------------------------------ + * safe_mem_constraint.c + * + * October 2008, Bo Berry + * 2012, Jonathan Toppins + * + * Copyright (c) 2008-2012 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_mem_constraint.h" +#include "safe_mem_lib.h" + + +static constraint_handler_t mem_handler = NULL; + + +/** + * NAME + * set_mem_constraint_handler_s + * + * SYNOPSIS + * #include "safe_mem_lib.h" + * constraint_handler_t + * set_mem_constraint_handler_straint_handler_t handler) + * + * DESCRIPTION + * The set_mem_constraint_handler_s function sets the runtime-constraint + * handler to be handler. The runtime-constraint handler is the function to + * be called when a library function detects a runtime-constraint + * order: + * 1. A pointer to a character string describing the + * runtime-constraint violation. + * 2. A null pointer or a pointer to an implementation defined + * object. + * 3. If the function calling the handler has a return type declared + * as errno_t, the return value of the function is passed. + * Otherwise, a positive value of type errno_t is passed. + * The implementation has a default constraint handler that is used if no + * calls to the set_constraint_handler_s function have been made. The + * behavior of the default handler is implementation-defined, and it may + * cause the program to exit or abort. If the handler argument to + * set_constraint_handler_s is a null pointer, the implementation default + * handler becomes the current constraint handler. + * + * SPECIFIED IN + * ISO/IEC JTC1 SC22 WG14 N1172, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * *msg Pointer to the message describing the error + * + * *ptr Pointer to aassociated data. Can be NULL. + * + * error The error code encountered. + * + * OUTPUT PARAMETERS + * none + * + * RETURN VALUE + * none + * + * ALSO SEE + * set_str_constraint_handler_s() + */ +constraint_handler_t +set_mem_constraint_handler_s (constraint_handler_t handler) +{ + constraint_handler_t prev_handler = mem_handler; + if (NULL == handler) { + mem_handler = sl_default_handler; + } else { + mem_handler = handler; + } + return prev_handler; +} +EXPORT_SYMBOL(set_mem_constraint_handler_s); + + +/** + * NAME + * invoke_safe_mem_constraint_handler + * + * SYNOPSIS + * #include "safe_mem_constraint.h" + * void + * invoke_safe_mem_constraint_handler(const char *msg, + * void *ptr, + * errno_t error) + * + * DESCRIPTION + * Invokes the currently set constraint handler or the default. + * + * INPUT PARAMETERS + * *msg Pointer to the message describing the error + * + * *ptr Pointer to aassociated data. Can be NULL. + * + * error The error code encountered. + * + * OUTPUT PARAMETERS + * none + * + * RETURN VALUE + * none + * + */ +void +invoke_safe_mem_constraint_handler (const char *msg, + void *ptr, + errno_t error) +{ + if (NULL != mem_handler) { + mem_handler(msg, ptr, error); + } else { + sl_default_handler(msg, ptr, error); + } +} diff --git a/casadm/safeclib/safe_mem_constraint.h b/casadm/safeclib/safe_mem_constraint.h new file mode 100644 index 000000000..7ec898e1f --- /dev/null +++ b/casadm/safeclib/safe_mem_constraint.h @@ -0,0 +1,46 @@ +/*------------------------------------------------------------------ + * safe_mem_constraint.h + * + * October 2008, Bo Berry + * + * Copyright (c) 2008, 2009 by Cisco Systems, Inc. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_MEM_CONSTRAINT_H__ +#define __SAFE_MEM_CONSTRAINT_H__ + +#include "safeclib_private.h" + +/* + * Function used by the libraries to invoke the registered + * runtime-constraint handler. Always needed. + */ +extern void invoke_safe_mem_constraint_handler( + const char *msg, + void *ptr, + errno_t error); + +#endif /* __SAFE_MEM_CONSTRAINT_H__ */ diff --git a/casadm/safeclib/safe_mem_lib.h b/casadm/safeclib/safe_mem_lib.h new file mode 100644 index 000000000..89aacbc7e --- /dev/null +++ b/casadm/safeclib/safe_mem_lib.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------ + * safe_mem_lib.h -- Safe C Library Memory APIs + * + * October 2008, Bo Berry + * Modified 2012, Jonathan Toppins + * + * Copyright (c) 2008-2012 by Cisco Systems, Inc. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_MEM_LIB_H__ +#define __SAFE_MEM_LIB_H__ + +#include "safe_lib.h" + +#define RSIZE_MAX_MEM ( 256UL << 20 ) /* 256MB */ +#define RSIZE_MAX_MEM16 ( RSIZE_MAX_MEM/2 ) +#define RSIZE_MAX_MEM32 ( RSIZE_MAX_MEM/4 ) + +/* set memory constraint handler */ +extern constraint_handler_t +set_mem_constraint_handler_s(constraint_handler_t handler); + +/* copy memory */ +extern errno_t memcpy_s(void *dest, rsize_t dmax, + const void *src, rsize_t slen); + +/* set bytes */ +extern errno_t memset_s(void *dest, rsize_t dmax, uint8_t value); + +/* move memory, including overlapping memory */ +extern errno_t memmove_s(void *dest, rsize_t dmax, + const void *src, rsize_t slen); + +#endif /* __SAFE_MEM_LIB_H__ */ diff --git a/casadm/safeclib/safe_str_constraint.c b/casadm/safeclib/safe_str_constraint.c new file mode 100644 index 000000000..17e7fbbb4 --- /dev/null +++ b/casadm/safeclib/safe_str_constraint.c @@ -0,0 +1,146 @@ +/*------------------------------------------------------------------ + * safe_str_constraint.c + * + * October 2008, Bo Berry + * 2012, Jonathan Toppins + * + * Copyright (c) 2008, 2009, 2012 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_str_constraint.h" +#include "safe_str_lib.h" + + +static constraint_handler_t str_handler = NULL; + + +/** + * NAME + * set_str_constraint_handler_s + * + * SYNOPSIS + * #include "safe_str_lib.h" + * constraint_handler_t + * set_str_constraint_handler_s(constraint_handler_t handler) + * + * DESCRIPTION + * The set_str_constraint_handler_s function sets the runtime-constraint + * handler to be handler. The runtime-constraint handler is the function to + * be called when a library function detects a runtime-constraint + * violation. Only the most recent handler registered with + * set_str_constraint_handler_s is called when a runtime-constraint + * violation occurs. + * When the handler is called, it is passed the following arguments in + * the following order: + * 1. A pointer to a character string describing the + * runtime-constraint violation. + * 2. A null pointer or a pointer to an implementation defined + * object. + * 3. If the function calling the handler has a return type declared + * as errno_t, the return value of the function is passed. + * Otherwise, a positive value of type errno_t is passed. + * The implementation has a default constraint handler that is used if no + * calls to the set_constraint_handler_s function have been made. The + * behavior of the default handler is implementation-defined, and it may + * cause the program to exit or abort. If the handler argument to + * set_constraint_handler_s is a null pointer, the implementation default + * handler becomes the current constraint handler. + * + * SPECIFIED IN + * ISO/IEC JTC1 SC22 WG14 N1172, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * *msg Pointer to the message describing the error + * + * *ptr Pointer to aassociated data. Can be NULL. + * + * error The error code encountered. + * + * OUTPUT PARAMETERS + * none + * + * RETURN VALUE + * none + * + * ALSO SEE + * set_str_constraint_handler_s() + */ +constraint_handler_t +set_str_constraint_handler_s (constraint_handler_t handler) +{ + constraint_handler_t prev_handler = str_handler; + if (NULL == handler) { + str_handler = sl_default_handler; + } else { + str_handler = handler; + } + return prev_handler; +} +EXPORT_SYMBOL(set_str_constraint_handler_s); + + +/** + * NAME + * invoke_safe_str_constraint_handler + * + * SYNOPSIS + * #include "safe_str_constraint.h" + * void + * invoke_safe_str_constraint_handler (const char *msg, + * void *ptr, + * errno_t error) + * + * DESCRIPTION + * Invokes the currently set constraint handler or the default. + * + * INPUT PARAMETERS + * *msg Pointer to the message describing the error + * + * *ptr Pointer to aassociated data. Can be NULL. + * + * error The error code encountered. + * + * OUTPUT PARAMETERS + * none + * + * RETURN VALUE + * none + * + */ +void +invoke_safe_str_constraint_handler (const char *msg, + void *ptr, + errno_t error) +{ + if (NULL != str_handler) { + str_handler(msg, ptr, error); + } else { + sl_default_handler(msg, ptr, error); + } +} diff --git a/casadm/safeclib/safe_str_constraint.h b/casadm/safeclib/safe_str_constraint.h new file mode 100644 index 000000000..d0de95d83 --- /dev/null +++ b/casadm/safeclib/safe_str_constraint.h @@ -0,0 +1,64 @@ +/*------------------------------------------------------------------ + * safe_str_constraint.h + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 Cisco Systems + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_STR_CONSTRAINT_H__ +#define __SAFE_STR_CONSTRAINT_H__ + +#include "safeclib_private.h" + +/* + * Function used by the libraries to invoke the registered + * runtime-constraint handler. Always needed. + */ +extern void invoke_safe_str_constraint_handler( + const char *msg, + void *ptr, + errno_t error); + + +/* + * Safe C Lib internal string routine to consolidate error handling + */ +static inline void handle_error(char *orig_dest, rsize_t orig_dmax, + char *err_msg, errno_t err_code) +{ +#ifdef SAFECLIB_STR_NULL_SLACK + /* null string to eliminate partial copy */ + while (orig_dmax) { *orig_dest = '\0'; orig_dmax--; orig_dest++; } +#else + *orig_dest = '\0'; +#endif + + invoke_safe_str_constraint_handler(err_msg, NULL, err_code); + return; +} + +#endif /* __SAFE_STR_CONSTRAINT_H__ */ diff --git a/casadm/safeclib/safe_str_lib.h b/casadm/safeclib/safe_str_lib.h new file mode 100644 index 000000000..0d5c8efd5 --- /dev/null +++ b/casadm/safeclib/safe_str_lib.h @@ -0,0 +1,71 @@ +/*------------------------------------------------------------------ + * safe_str_lib.h -- Safe C Library String APIs + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011, 2013 by Cisco Systems, Inc. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_STR_LIB_H__ +#define __SAFE_STR_LIB_H__ + +#include "safe_lib.h" + +/* + * The shortest string is a null string!! + */ +#define RSIZE_MIN_STR ( 1 ) + +/* maximum sring length */ +#define RSIZE_MAX_STR ( 4UL << 10 ) /* 4KB */ + + +/* The makeup of a password */ +#define SAFE_STR_MIN_LOWERCASE ( 2 ) +#define SAFE_STR_MIN_UPPERCASE ( 2 ) +#define SAFE_STR_MIN_NUMBERS ( 1 ) +#define SAFE_STR_MIN_SPECIALS ( 1 ) + +#define SAFE_STR_PASSWORD_MIN_LENGTH ( 6 ) +#define SAFE_STR_PASSWORD_MAX_LENGTH ( 32 ) + +/* set string constraint handler */ +extern constraint_handler_t +set_str_constraint_handler_s(constraint_handler_t handler); + +/* fitted string copy */ +extern errno_t +strncpy_s(char *dest, rsize_t dmax, const char *src, rsize_t slen); + +/* string length */ +extern rsize_t +strnlen_s (const char *s, rsize_t smax); + +/* string tokenizer */ +extern char * +strtok_s(char *s1, rsize_t *s1max, const char *src, char **ptr); + +#endif /* __SAFE_STR_LIB_H__ */ diff --git a/casadm/safeclib/safe_types.h b/casadm/safeclib/safe_types.h new file mode 100644 index 000000000..057063fae --- /dev/null +++ b/casadm/safeclib/safe_types.h @@ -0,0 +1,59 @@ +/*------------------------------------------------------------------ + * safe_types.h - C99 std types & defs or Linux kernel equivalents + * + * March 2007, Bo Berry + * Modified 2012, Jonathan Toppins + * + * Copyright (c) 2007-2013 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFE_TYPES_H__ +#define __SAFE_TYPES_H__ + +#ifdef __KERNEL__ +/* linux kernel environment */ + +#include +#include +#include + +/* errno_t isn't defined in the kernel */ +typedef int errno_t; + +#else + +#include +#include +#include +#include +#include + +typedef int errno_t; + +#include + +#endif /* __KERNEL__ */ +#endif /* __SAFE_TYPES_H__ */ diff --git a/casadm/safeclib/safeclib_private.h b/casadm/safeclib/safeclib_private.h new file mode 100644 index 000000000..7280e879a --- /dev/null +++ b/casadm/safeclib/safeclib_private.h @@ -0,0 +1,93 @@ +/*------------------------------------------------------------------ + * safeclib_private.h - Internal library references + * + * 2012, Jonathan Toppins + * + * Copyright (c) 2012, 2013 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#ifndef __SAFECLIB_PRIVATE_H__ +#define __SAFECLIB_PRIVATE_H__ + +#ifdef __KERNEL__ +/* linux kernel environment */ + +#include +#include +#include + +#define RCNEGATE(x) ( -(x) ) + +#define slprintf(...) printk(KERN_EMERG __VA_ARGS__) +#define slabort() +#ifdef DEBUG +#define sldebug_printf(...) printk(KERN_DEBUG __VA_ARGS__) +#endif + +#else /* !__KERNEL__ */ + +#if HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#ifdef STDC_HEADERS +# include +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_LIMITS_H +# include +#endif + +#define EXPORT_SYMBOL(sym) +#define RCNEGATE(x) (x) + +#define slprintf(...) fprintf(stderr, __VA_ARGS__) +#define slabort() abort() +#ifdef DEBUG +#define sldebug_printf(...) printf(__VA_ARGS__) +#endif + +#endif /* __KERNEL__ */ + +#ifndef sldebug_printf +#define sldebug_printf(...) +#endif + +#include "safe_lib.h" + +#endif /* __SAFECLIB_PRIVATE_H__ */ diff --git a/casadm/safeclib/strncpy_s.c b/casadm/safeclib/strncpy_s.c new file mode 100644 index 000000000..e1cdd6b17 --- /dev/null +++ b/casadm/safeclib/strncpy_s.c @@ -0,0 +1,238 @@ +/*------------------------------------------------------------------ + * strncpy_s.c + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_str_constraint.h" +#include "safe_str_lib.h" + + +/* + * NAME + * strncpy_s + * + * SYNOPSIS + * #include "safe_str_lib.h" + * errno_t + * strncpy_s(char *dest, rsize_t dmax, const char *src, rsize_t slen) + * + * DESCRIPTION + * The strncpy_s function copies not more than slen successive characters + * (characters that follow a null character are not copied) from the + * array pointed to by src to the array pointed to by dest. If no null + * character was copied from src, then dest[n] is set to a null character. + * + * All elements following the terminating null character (if any) + * written by strncpy_s in the array of dmax characters pointed to + * by dest take on the null value when strncpy_s returns. + * + * Specicified in: + * ISO/IEC TR 24731-1, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to string that will be replaced by src. + * The resulting string is null terminated. + * + * dmax restricted maximum length of the resulting dest, + * including the null + * + * src pointer to the string that will be copied + * to string dest + * + * slen the maximum number of characters to copy from src + * + * OUTPUT PARAMETERS + * dest updated with src string + * + * RUNTIME CONSTRAINTS + * Neither dmax nor slen shall be equal to zero. + * Neither dmax nor slen shall be equal zero. + * Neither dmax nor slen shall be greater than RSIZE_MAX_STR. + * If slen is either greater than or equal to dmax, then dmax + * should be more than strnlen_s(src,dmax) + * Copying shall not take place between objects that overlap. + * If there is a runtime-constraint violation, then if dest + * is not a null pointer and dmax greater than RSIZE_MAX_STR, + * then strncpy_s nulls dest. + * + * RETURN VALUE + * EOK successful operation, the characters in src were copied + * to dest and the result is null terminated. + * ESNULLP NULL pointer + * ESZEROL zero length + * ESLEMAX length exceeds max limit + * ESOVRLP strings overlap + * ESNOSPC not enough space to copy src + * + * ALSO SEE + * strcat_s(), strncat_s(), strcpy_s() + *- + */ +errno_t +strncpy_s (char *dest, rsize_t dmax, const char *src, rsize_t slen) +{ + rsize_t orig_dmax; + char *orig_dest; + const char *overlap_bumper; + + if (dest == NULL) { + invoke_safe_str_constraint_handler("strncpy_s: dest is null", + NULL, ESNULLP); + return RCNEGATE(ESNULLP); + } + + if (dmax == 0) { + invoke_safe_str_constraint_handler("strncpy_s: dmax is 0", + NULL, ESZEROL); + return RCNEGATE(ESZEROL); + } + + if (dmax > RSIZE_MAX_STR) { + invoke_safe_str_constraint_handler("strncpy_s: dmax exceeds max", + NULL, ESLEMAX); + return RCNEGATE(ESLEMAX); + } + + /* hold base in case src was not copied */ + orig_dmax = dmax; + orig_dest = dest; + + if (src == NULL) { + handle_error(orig_dest, orig_dmax, "strncpy_s: " + "src is null", + ESNULLP); + return RCNEGATE(ESNULLP); + } + + if (slen == 0) { + handle_error(orig_dest, orig_dmax, "strncpy_s: " + "slen is zero", + ESZEROL); + return RCNEGATE(ESZEROL); + } + + if (slen > RSIZE_MAX_STR) { + handle_error(orig_dest, orig_dmax, "strncpy_s: " + "slen exceeds max", + ESLEMAX); + return RCNEGATE(ESLEMAX); + } + + + if (dest < src) { + overlap_bumper = src; + + while (dmax > 0) { + if (dest == overlap_bumper) { + handle_error(orig_dest, orig_dmax, "strncpy_s: " + "overlapping objects", + ESOVRLP); + return RCNEGATE(ESOVRLP); + } + + if (slen == 0) { + /* + * Copying truncated to slen chars. Note that the TR says to + * copy slen chars plus the null char. We null the slack. + */ +#ifdef SAFECLIB_STR_NULL_SLACK + while (dmax) { *dest = '\0'; dmax--; dest++; } +#else + *dest = '\0'; +#endif + return RCNEGATE(EOK); + } + + *dest = *src; + if (*dest == '\0') { +#ifdef SAFECLIB_STR_NULL_SLACK + /* null slack */ + while (dmax) { *dest = '\0'; dmax--; dest++; } +#endif + return RCNEGATE(EOK); + } + + dmax--; + slen--; + dest++; + src++; + } + + } else { + overlap_bumper = dest; + + while (dmax > 0) { + if (src == overlap_bumper) { + handle_error(orig_dest, orig_dmax, "strncpy_s: " + "overlapping objects", + ESOVRLP); + return RCNEGATE(ESOVRLP); + } + + if (slen == 0) { + /* + * Copying truncated to slen chars. Note that the TR says to + * copy slen chars plus the null char. We null the slack. + */ +#ifdef SAFECLIB_STR_NULL_SLACK + while (dmax) { *dest = '\0'; dmax--; dest++; } +#else + *dest = '\0'; +#endif + return RCNEGATE(EOK); + } + + *dest = *src; + if (*dest == '\0') { +#ifdef SAFECLIB_STR_NULL_SLACK + /* null slack */ + while (dmax) { *dest = '\0'; dmax--; dest++; } +#endif + return RCNEGATE(EOK); + } + + dmax--; + slen--; + dest++; + src++; + } + } + + /* + * the entire src was not copied, so zero the string + */ + handle_error(orig_dest, orig_dmax, "strncpy_s: not enough " + "space for src", + ESNOSPC); + return RCNEGATE(ESNOSPC); +} +EXPORT_SYMBOL(strncpy_s); diff --git a/casadm/safeclib/strnlen_s.c b/casadm/safeclib/strnlen_s.c new file mode 100644 index 000000000..3df9d26e0 --- /dev/null +++ b/casadm/safeclib/strnlen_s.c @@ -0,0 +1,117 @@ +/*------------------------------------------------------------------ + * strnlen_s.c + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_str_constraint.h" +#include "safe_str_lib.h" + + +/** + * NAME + * strnlen_s + * + * SYNOPSIS + * #include "safe_str_lib.h" + * rsize_t + * strnlen_s(const char *dest, rsize_t dmax) + * + * DESCRIPTION + * The strnlen_s function computes the length of the string pointed + * to by dest. + * + * SPECIFIED IN + * ISO/IEC TR 24731-1, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to string + * + * dmax restricted maximum length (including null character). + * + * OUTPUT PARAMETERS + * none + * + * RUNTIME CONSTRAINTS + * dest shall not be a null pointer + * dmax shall not be greater than RSIZE_MAX_STR + * dmax shall not equal zero + * null character shall be in first dmax characters of dest + * + * RETURN VALUE + * The function returns the string length, excluding the terminating + * null character. If dest is NULL, then strnlen_s returns 0. + * + * Otherwise, the strnlen_s function returns the number of characters + * that precede the terminating null character. + * At most the first dmax characters of dest are accessed by strnlen_s. + * + * ALSO SEE + * strnterminate_s() + * + */ +rsize_t +strnlen_s (const char *dest, rsize_t dmax) +{ + rsize_t count; + rsize_t orig_dmax = dmax; + + if (dest == NULL) { + return RCNEGATE(0); + } + + if (dmax == 0) { + invoke_safe_str_constraint_handler("strnlen_s: dmax is 0", + NULL, ESZEROL); + return RCNEGATE(0); + } + + if (dmax > RSIZE_MAX_STR) { + invoke_safe_str_constraint_handler("strnlen_s: dmax exceeds max", + NULL, ESLEMAX); + return RCNEGATE(0); + } + + count = 0; + while (*dest && dmax) { + count++; + dmax--; + dest++; + } + if (count == orig_dmax) { + invoke_safe_str_constraint_handler("strnlen_s: string length exceeds dmax", + NULL, ESLEMAX); + return RCNEGATE(0); + } + + return RCNEGATE(count); +} +EXPORT_SYMBOL(strnlen_s); diff --git a/casadm/safeclib/strtok_s.c b/casadm/safeclib/strtok_s.c new file mode 100644 index 000000000..97b89fe16 --- /dev/null +++ b/casadm/safeclib/strtok_s.c @@ -0,0 +1,323 @@ +/*------------------------------------------------------------------ + * strtok_s.c + * + * October 2008, Bo Berry + * + * Copyright (c) 2008-2011 by Cisco Systems, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + *------------------------------------------------------------------ + */ + +#include "safeclib_private.h" +#include "safe_str_constraint.h" +#include "safe_str_lib.h" + + +/** + * NAME + * strtok_s + * + * SYNOPSIS + * #include "safe_str_lib.h" + * char * + * strtok_s(char *dest, rsize_t *dmax, char *src, char **ptr) + * + * DESCRIPTION + * A sequence of calls to the strtok_s function breaks the string + * pointed to by dest into a sequence of tokens, each of which is + * delimited by a character from the string pointed to by src. The + * fourth argument points to a caller-provided char pointer into + * which the strtok_s function stores information necessary for + * it to continue scanning the same string. + * + * The first call in a sequence has a non-null first argument and + * dmax points to an object whose value is the number of elements + * in the character array pointed to by the first argument. The + * first call stores an initial value in the object pointed to by + * ptr and updates the value pointed to by dmax to reflect the + * number of elements that remain in relation to ptr. Subsequent + * calls in the sequence have a null first argument and the objects + * pointed to by dmax and ptr are required to have the values + * stored by the previous call in the sequence, which are then + * updated. The separator string pointed to by src may be different + * from call to call. + * + * The first call in the sequence searches the string pointed to + * by dest for the first character that is not contained in the + * current separator string pointed to by src. If no such character + * is found, then there are no tokens in the string pointed to + * by dest and the strtok_s function returns a null pointer. If + * such a character is found, it is the start of the first token. + * + * The strtok_s function then searches from there for the first + * character in dest that is contained in the current separator + * string. If no such character is found, the current token + * extends to the end of the string pointed to by dest, and + * subsequent searches in the same string for a token return + * a null pointer. If such a character is found, it is + * overwritten by a null character, which terminates the + * current token. + * + * In all cases, the strtok_s function stores sufficient information + * in the pointer pointed to by ptr so that subsequent calls, + * with a null pointer for dest and the unmodified pointer value + * for ptr, shall start searching just past the element overwritten + * by a null character (if any). + * + * SPECIFIED IN + * ISO/IEC TR 24731-1, Programming languages, environments + * and system software interfaces, Extensions to the C Library, + * Part I: Bounds-checking interfaces + * + * INPUT PARAMETERS + * dest pointer to string to tokenize + * + * dmax restricted maximum length of dest string + * + * src pointer to delimiter string (len < 255) + * + * ptr returned pointer to token + * + * OUTPUT PARAMETERS + * dmax update length + * + * ptr update pointer to token + * + * RUNTIME CONSTRAINTS + * src shall not be a null pointer. + * ptr shall not be a null pointer. + * dmax shall not be a null pointer. + * *dmax shall not be 0. + * + * If dest is a null pointer, then *ptr shall not be a null pointer. + * + * dest must not be unterminated. + * + * The value of *dmax shall not be greater than RSIZE_MAX_STR. The + * end of the token found shall occur within the first *dmax + * characters of dest for the first call, and shall occur within + * the first *dmax characters of where searching resumes on + * subsequent calls. + * + * RETURN VALUE + * The strtok_s function returns a pointer to the first character + * of a token; or a null pointer if there is no token or there + * is a runtime-constraint violation. + * + * EOK + * ESNULLP NULL pointer + * ESZEROL zero length + * ESLEMAX length exceeds max limit + * ESUNTERM unterminated string + * + * EXAMPLES + * [1] Sequencial strtok_s() calls to tokenize a string + * + * String to tokenize str1 = ",.:*one,two;three,;four*.*.five-six***" + * len=38 + * String of delimiters str2 = ",.;*" + * + * p2tok = strtok_s(str1, &len, str2, &p2str); + * token -one- remaining -two;three,;four*.*.five-six***- len=30 + * + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * token -two- remaining -three,;four*.*.five-six***- len=26 + * + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * token -three- remaining -;four*.*.five-six***- len=20 + * + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * token -four- remaining -.*.five-six***- len=14 + * + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * token -five-six- remaining -**- len=2 + * + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * token -(null)- remaining -**- len=0 + * + * + * [2] While loop with same entry data as [1] + * + * p2tok = str1; + * while (p2tok && len) { + * p2tok = strtok_s(NULL, &len, str2, &p2str); + * printf(" token -- remaining -- len=0 \n", + * p2tok, p2str, (int)len ); + * } + * + *- + */ +char * +strtok_s(char *dest, rsize_t *dmax, const char *src, char **ptr) +{ + +/* + * CONFIGURE: The spec does not call out a maximum for the src + * string, so one is defined here. + */ +#define STRTOK_DELIM_MAX_LEN ( 16 ) + + + const char *pt; + char *ptoken; + rsize_t dlen; + rsize_t slen; + + if (dmax == NULL) { + invoke_safe_str_constraint_handler("strtok_s: dmax is NULL", + NULL, ESNULLP); + return (NULL); + } + + if (*dmax == 0) { + invoke_safe_str_constraint_handler("strtok_s: dmax is 0", + NULL, ESZEROL); + return (NULL); + } + + if (*dmax > RSIZE_MAX_STR) { + invoke_safe_str_constraint_handler("strtok_s: dmax exceeds max", + NULL, ESLEMAX); + return (NULL); + } + + if (src == NULL) { + invoke_safe_str_constraint_handler("strtok_s: src is null", + NULL, ESNULLP); + return (NULL); + } + + if (ptr == NULL) { + invoke_safe_str_constraint_handler("strtok_s: ptr is null", + NULL, ESNULLP); + return (NULL); + } + + /* if the source was NULL, use the tokenizer context */ + if (dest == NULL) { + dest = *ptr; + } + + /* + * scan dest for a delimiter + */ + dlen = *dmax; + ptoken = NULL; + while (*dest != '\0' && !ptoken) { + + if (dlen == 0) { + *ptr = NULL; + invoke_safe_str_constraint_handler( + "strtok_s: dest is unterminated", + NULL, ESUNTERM); + return (NULL); + } + + /* + * must scan the entire delimiter list + * ISO should have included a delimiter string limit!! + */ + slen = STRTOK_DELIM_MAX_LEN; + pt = src; + while (*pt != '\0') { + + if (slen == 0) { + *ptr = NULL; + invoke_safe_str_constraint_handler( + "strtok_s: src is unterminated", + NULL, ESUNTERM); + return (NULL); + } + slen--; + + if (*dest == *pt) { + ptoken = NULL; + break; + } else { + pt++; + ptoken = dest; + } + } + dest++; + dlen--; + } + + /* + * if the beginning of a token was not found, then no + * need to continue the scan. + */ + if (ptoken == NULL) { + *dmax = dlen; + return (ptoken); + } + + /* + * Now we need to locate the end of the token + */ + while (*dest != '\0') { + + if (dlen == 0) { + *ptr = NULL; + invoke_safe_str_constraint_handler( + "strtok_s: dest is unterminated", + NULL, ESUNTERM); + return (NULL); + } + + slen = STRTOK_DELIM_MAX_LEN; + pt = src; + while (*pt != '\0') { + + if (slen == 0) { + *ptr = NULL; + invoke_safe_str_constraint_handler( + "strtok_s: src is unterminated", + NULL, ESUNTERM); + return (NULL); + } + slen--; + + if (*dest == *pt) { + /* + * found a delimiter, set to null + * and return context ptr to next char + */ + *dest = '\0'; + *ptr = (dest + 1); /* return pointer for next scan */ + *dmax = dlen - 1; /* account for the nulled delimiter */ + return (ptoken); + } else { + /* + * simply scanning through the delimiter string + */ + pt++; + } + } + dest++; + dlen--; + } + + *dmax = dlen; + return (ptoken); +} diff --git a/casadm/statistics_model.c b/casadm/statistics_model.c new file mode 100644 index 000000000..b98a9eb6f --- /dev/null +++ b/casadm/statistics_model.c @@ -0,0 +1,1308 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cas_lib.h" +#include "extended_err_msg.h" +#include "cas_lib_utils.h" +#include +#include +#include + +#include "csvparse.h" +#include "statistics_view.h" +#include "safeclib/safe_str_lib.h" +#include "ocf/ocf_cache.h" + +#define IOCLASS_UNCLASSIFIED (0) + +#define UNIT_REQUESTS "Requests" +#define UNIT_BLOCKS "4KiB blocks" + +#define ALLOWED_NUMBER_OF_ATTEMPTS 10 + +static inline float percentage(uint64_t numerator, uint64_t denominator) +{ + float result; + if (denominator) { + result = 100.0 * numerator / denominator; + } else { + result = 0; + } + return result; +} + +static inline long unsigned int cache_line_in_4k(uint64_t size, + ocf_cache_line_size_t cache_line_size) +{ + long unsigned int result; + + result = size * (cache_line_size / 4); + + return result; +} + +static inline unsigned long bytes_to_4k(uint64_t size) +{ + return (size + 4095UL) >> 12; +} + +static float calc_gb(uint32_t clines) +{ + return (float) clines * 4 * KiB / GiB; +} + +static void print_dirty_for_time(uint32_t t, FILE *outfile) +{ + uint32_t d, h, m, s; + + fprintf(outfile, "%u,[s],", t); + + if (!t) { + fprintf(outfile, "Cache clean"); + return; + } + + d = t / (24 * 3600); + h = (t % (24 * 3600)) / 3600; + m = (t % 3600) / 60; + s = (t % 60); + + if (d) { + fprintf(outfile, "%u [d] ", d); + } + if (h) { + fprintf(outfile, "%u [h] ", h); + } + if (m) { + fprintf(outfile, "%u [m] ", m); + } + if (s) { + fprintf(outfile, "%u [s] ", s); + } +} + +__attribute__((format(printf, 3, 4))) +static void print_kv_pair(FILE *outfile, const char *title, const char *fmt, ...) +{ + va_list ap; + + fprintf(outfile, TAG(KV_PAIR) "\"%s\",", title); + va_start(ap, fmt); + vfprintf(outfile, fmt, ap); + va_end(ap); + fprintf(outfile, "\n"); +} + +static void print_kv_pair_time(FILE *outfile, const char *title, uint32_t time) +{ + fprintf(outfile, TAG(KV_PAIR) "\"%s\",", title); + print_dirty_for_time(time, outfile); + fprintf(outfile, "\n"); +} + +static void begin_record(FILE *outfile) +{ + fprintf(outfile, TAG(RECORD) "\n"); +} + +static void print_table_header(FILE *outfile, uint32_t ncols, ...) +{ + va_list ap; + const char *s; + + fprintf(outfile, TAG(TABLE_HEADER)); + va_start(ap, ncols); + while (ncols--) { + s = va_arg(ap, const char *); + fprintf(outfile, "\"%s\"%s", s, ncols ? "," : "\n"); + } + va_end(ap); +} + +static void print_val_perc_table_elem(FILE *outfile, const char *tag, + const char *title, const char *unit, + float percent, const char * fmt, + va_list ap) +{ + fprintf(outfile, "%s\"%s\",", tag, title); + vfprintf(outfile, fmt, ap); + fprintf(outfile, ",%.1f", percent); + if (unit) { + fprintf(outfile, ",\"[%s]\"", unit); + } + fprintf(outfile, "\n"); +} + +__attribute__((format(printf, 5, 6))) +static inline void print_val_perc_table_row(FILE *outfile, const char *title, + const char *unit, float percent, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + print_val_perc_table_elem(outfile, TAG(TABLE_ROW), title, unit, + percent, fmt, ap); + va_end(ap); +} + +__attribute__((format(printf, 5, 6))) +static inline void print_val_perc_table_section(FILE *outfile, const char *title, + const char *unit, float percent, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + print_val_perc_table_elem(outfile, TAG(TABLE_SECTION), title, unit, + percent, fmt, ap); + va_end(ap); +} + +static inline const char *make_row_title(const char *s1, const char *s2) +{ + static char buffer[64]; + snprintf(buffer, sizeof(buffer), "%s %s", s1, s2); + return buffer; +} + +static void print_core_conf(const struct kcas_core_info *info, + uint32_t cache_size, FILE *outfile, + ocf_cache_line_size_t cache_line_size) +{ + uint64_t core_size; + float core_size_gb; + + core_size = info->stats.core_size_bytes / KiB / 4; + core_size_gb = calc_gb(core_size); + + print_kv_pair(outfile, "Core Id", "%i", info->core_id); + print_kv_pair(outfile, "Core Device", "%s", + info->core_path_name); + print_kv_pair(outfile, "Exported Object", "/dev/cas%d-%d", + info->cache_id, info->core_id); + print_kv_pair(outfile, "Core Size", "%lu, [4KiB Blocks], %.2f, [GiB]", + core_size, core_size_gb); + print_kv_pair_time(outfile, "Dirty for", info->stats.dirty_for); + + print_kv_pair(outfile, "Status", "%s", + get_core_state_name(info->state)); + + print_kv_pair(outfile, "Seq cutoff threshold", "%llu, [KiB]", + info->stats.seq_cutoff_threshold / KiB); + + print_kv_pair(outfile, "Seq cutoff policy", "%s", + seq_cutoff_policy_to_name(info->stats.seq_cutoff_policy)); +} + +static void print_usage_header(FILE* outfile) +{ + print_table_header(outfile, 4, "Usage statistics", "Count", + "%", "[Units]"); +} + +static void print_core_usage(const struct ocf_stats_core* exp_obj_stats, + uint32_t cache_size, uint32_t cache_occupancy, + FILE* outfile, ocf_cache_line_size_t line_size) +{ + print_usage_header(outfile); + + print_val_perc_table_row(outfile, "Occupancy", UNIT_BLOCKS, + percentage(exp_obj_stats->cache_occupancy, + cache_size), + "%lu", + cache_line_in_4k(exp_obj_stats->cache_occupancy, + line_size)); + print_val_perc_table_row(outfile, "Free", UNIT_BLOCKS, + percentage(cache_size - cache_occupancy, cache_size), + "%lu", + cache_line_in_4k(cache_size - cache_occupancy, + line_size)); + print_val_perc_table_row(outfile, "Clean", UNIT_BLOCKS, + percentage(exp_obj_stats->cache_occupancy - exp_obj_stats->dirty, + exp_obj_stats->cache_occupancy), + "%lu", + cache_line_in_4k(exp_obj_stats->cache_occupancy - exp_obj_stats->dirty, + line_size)); + print_val_perc_table_row(outfile, "Dirty", UNIT_BLOCKS, + percentage(exp_obj_stats->dirty, + exp_obj_stats->cache_occupancy), + "%lu", + cache_line_in_4k(exp_obj_stats->dirty, + line_size)); +} + +static void print_req_section(const struct ocf_stats_req *stats, const char *op_name, + FILE *outfile, uint64_t total_reqs) +{ + uint64_t cache_hits; + float percent; + + cache_hits = stats->total - (stats->full_miss + stats->partial_miss); + + percent = percentage(cache_hits, total_reqs); + print_val_perc_table_section(outfile, make_row_title(op_name, "hits"), + UNIT_REQUESTS, percent, "%lu", cache_hits); + + percent = percentage(stats->partial_miss, total_reqs); + print_val_perc_table_row(outfile, make_row_title(op_name, "partial misses"), + UNIT_REQUESTS, percent, "%lu", stats->partial_miss); + + percent = percentage(stats->full_miss, total_reqs); + print_val_perc_table_row(outfile, make_row_title(op_name, "full misses"), + UNIT_REQUESTS, percent, "%lu", stats->full_miss); + + percent = percentage(stats->total, total_reqs); + print_val_perc_table_row(outfile, make_row_title(op_name, "total"), + UNIT_REQUESTS, percent, "%lu", stats->total); +} + +static void print_req_stats(const struct ocf_stats_core *exp_obj_stats, + FILE *outfile) +{ + const struct ocf_stats_req *req_stats; + float percent; + uint64_t total_reqs = 0, serv_reqs = 0; + + print_table_header(outfile, 4, "Request statistics", "Count", + "%", "[Units]"); + + total_reqs += exp_obj_stats->read_reqs.total; + total_reqs += exp_obj_stats->write_reqs.total; + + serv_reqs = total_reqs; + + total_reqs += exp_obj_stats->read_reqs.pass_through; + total_reqs += exp_obj_stats->write_reqs.pass_through; + + /* Section for reads. */ + req_stats = &exp_obj_stats->read_reqs; + print_req_section(req_stats, "Read", outfile, total_reqs); + + /* Section for writes. */ + req_stats = &exp_obj_stats->write_reqs; + print_req_section(req_stats, "Write", outfile, total_reqs); + + /* Pass-Through requests. */ + percent = percentage(exp_obj_stats->read_reqs.pass_through, total_reqs); + print_val_perc_table_section(outfile, "Pass-Through reads", UNIT_REQUESTS, + percent, "%lu", + exp_obj_stats->read_reqs.pass_through); + + percent = percentage(exp_obj_stats->write_reqs.pass_through, total_reqs); + print_val_perc_table_row(outfile, "Pass-Through writes", UNIT_REQUESTS, + percent, "%lu", + exp_obj_stats->write_reqs.pass_through); + + /* Summary. */ + percent = percentage(serv_reqs, total_reqs); + print_val_perc_table_row(outfile, "Serviced requests", UNIT_REQUESTS, + percent, "%lu", serv_reqs); + + print_val_perc_table_section(outfile, "Total requests", UNIT_REQUESTS, + total_reqs ? 100.0f : 0.0f, "%lu", + total_reqs); +} + +static void print_block_section(const struct ocf_stats_block *stats_4k, + const char *dev_name, FILE *outfile, + ocf_cache_line_size_t cache_line_size) +{ + uint64_t total_4k; + float percent; + + total_4k = stats_4k->read + stats_4k->write; + + percent = percentage(stats_4k->read, total_4k); + print_val_perc_table_section(outfile, + make_row_title("Reads from", dev_name), + UNIT_BLOCKS, percent, "%lu", stats_4k->read); + + percent = percentage(stats_4k->write, total_4k); + print_val_perc_table_row(outfile, + make_row_title("Writes to", dev_name), + UNIT_BLOCKS, percent, "%lu", stats_4k->write); + + print_val_perc_table_row(outfile, + make_row_title("Total to/from", dev_name), + UNIT_BLOCKS, total_4k ? 100.0f : 0.0f, "%lu", + total_4k); +} + +static struct ocf_stats_block convert_block_stats_to_4k( + const struct ocf_stats_block *stats) +{ + struct ocf_stats_block stats_4k; + stats_4k.read = bytes_to_4k(stats->read); + stats_4k.write = bytes_to_4k(stats->write); + return stats_4k; +} + +void print_block_stats(const struct ocf_stats_core *exp_obj_stats, + FILE *outfile, ocf_cache_line_size_t cache_line_size) +{ + struct ocf_stats_block cache_volume_stats_4k = + convert_block_stats_to_4k(&exp_obj_stats->cache_volume); + struct ocf_stats_block core_volume_stats_4k = + convert_block_stats_to_4k(&exp_obj_stats->core_volume); + struct ocf_stats_block core_stats_4k = + convert_block_stats_to_4k(&exp_obj_stats->core); + + print_table_header(outfile, 4, "Block statistics", "Count", + "%", "[Units]"); + + print_block_section(&core_volume_stats_4k, "core", outfile, + cache_line_size); + print_block_section(&cache_volume_stats_4k, "cache", outfile, + cache_line_size); + print_block_section(&core_stats_4k, "exported object", outfile, + cache_line_size); +} + +static void print_error_section(const struct ocf_stats_error *stats, + const char *section_name, FILE *outfile) +{ + uint64_t total = 0; + float percent; + + total = stats->read + stats->write; + + percent = percentage(stats->read, total); + print_val_perc_table_section(outfile, + make_row_title(section_name , "read errors"), + UNIT_REQUESTS, percent, "%u", stats->read); + percent = percentage(stats->write, total); + print_val_perc_table_row(outfile, + make_row_title(section_name, "write errors"), + UNIT_REQUESTS, percent, "%u", stats->write); + print_val_perc_table_row(outfile, + make_row_title(section_name, "total errors"), + UNIT_REQUESTS, total ? 100.0f : 0.0f, "%lu", total); +} + +static void print_error_stats_total(const struct ocf_stats_error *cache_stats, + const struct ocf_stats_error *core_stats, + FILE *outfile) +{ + uint64_t total; + + total = cache_stats->read + cache_stats->write + + core_stats->read + core_stats->write; + + print_val_perc_table_section(outfile, "Total errors", UNIT_REQUESTS, + total ? 100.0f : 0.0f, "%lu", total); +} + +static void print_error_stats(const struct ocf_stats_core *exp_obj_stats, + FILE *outfile) +{ + print_table_header(outfile, 4, "Error statistics", "Count", "%", "[Units]"); + + print_error_section(&exp_obj_stats->cache_errors, "Cache", outfile); + print_error_section(&exp_obj_stats->core_errors, "Core", outfile); + + print_error_stats_total(&exp_obj_stats->cache_errors, + &exp_obj_stats->core_errors, outfile); +} + +void cache_stats_core_counters(const struct kcas_core_info *info, + uint32_t cache_size, uint32_t cache_occupancy, + unsigned int stats_filters, FILE *outfile, + ocf_cache_line_size_t cache_line_size) +{ + const struct ocf_stats_core *stats = &info->stats; + + begin_record(outfile); + if (stats_filters & STATS_FILTER_CONF) { + print_core_conf(info, cache_size, outfile, cache_line_size); + } + + if (stats_filters & STATS_FILTER_USAGE) { + print_core_usage(stats, cache_size, cache_occupancy, + outfile, cache_line_size); + } + + if (stats_filters & STATS_FILTER_REQ) { + print_req_stats(stats, outfile); + } + + if (stats_filters & STATS_FILTER_BLK) { + print_block_stats(stats, outfile, cache_line_size); + } + + if (stats_filters & STATS_FILTER_ERR) { + print_error_stats(stats, outfile); + } +} + +static void print_stats_ioclass_conf(const struct kcas_io_class* io_class, + FILE* outfile) +{ + print_kv_pair(outfile, "IO class ID", "%d", io_class->class_id); + print_kv_pair(outfile, "IO class name", "%s", io_class->info.name); + if (-1 == io_class->info.priority) { + print_kv_pair(outfile, "Eviction priority", "Pinned"); + } else { + print_kv_pair(outfile, "Eviction priority", "%d", + io_class->info.priority); + } + print_kv_pair(outfile, "Selective allocation", "%s", + io_class->info.cache_mode != ocf_cache_mode_pt ? + "Yes" : "No"); +} + +static void print_stats_ioclass_usage(uint32_t part_id, + const struct ocf_stats_io_class* part_stats, + const struct ocf_stats_io_class* denominators, + FILE *outfile, uint32_t cache_size, + ocf_cache_line_size_t cache_line_size) +{ + float percent; + uint64_t clean; + + print_table_header(outfile, 4, "Usage statistics", "Count", "%", "[Units]"); + + percent = percentage(part_stats->occupancy_clines, cache_size); + print_val_perc_table_section(outfile, "Occupancy", UNIT_BLOCKS, percent, + "%ld", + cache_line_in_4k(part_stats->occupancy_clines, + cache_line_size)); + + /* Occupancy, dirty, etc. information. */ + /* For now free stat should be printed for the unclassified IO class. */ + if (IOCLASS_UNCLASSIFIED == part_id) { + print_val_perc_table_row(outfile, "Free", UNIT_BLOCKS, + 100.0f, "%ld", + cache_line_in_4k(part_stats->free_clines, + cache_line_size)); + } else { + print_val_perc_table_row(outfile, "Free", UNIT_BLOCKS, + 0.0f, "%d", 0); + } + + clean = part_stats->occupancy_clines - part_stats->dirty_clines; + percent = percentage(clean, part_stats->occupancy_clines); + print_val_perc_table_row(outfile, "Clean", UNIT_BLOCKS, percent, + "%ld", + cache_line_in_4k(clean, cache_line_size)); + + percent = percentage(part_stats->dirty_clines, part_stats->occupancy_clines); + print_val_perc_table_row(outfile, "Dirty", UNIT_BLOCKS, percent, + "%ld", + cache_line_in_4k(part_stats->dirty_clines, + cache_line_size)); +} + +static void print_stats_ioclass_req(const struct ocf_stats_io_class* part_stats, + const struct ocf_stats_io_class* denominators, + FILE *outfile, uint64_t req_grand_total) +{ + const struct ocf_stats_req *req_stats; + float percent; + uint64_t hits; + uint64_t serv_reqs = 0; + uint64_t total_reqs = 0; + + print_table_header(outfile, 4, "Request statistics", "Count", + "%", "[Units]"); + + /* Handling read operations. */ + req_stats = &part_stats->read_reqs; + + hits = req_stats->total - (req_stats->partial_miss + req_stats->full_miss); + percent = percentage(hits, req_grand_total); + print_val_perc_table_section(outfile, "Read hits", UNIT_REQUESTS, percent, + "%ld", hits); + + percent = percentage(req_stats->partial_miss, req_grand_total); + print_val_perc_table_row(outfile, "Read partial misses", UNIT_REQUESTS, + percent, "%ld", req_stats->partial_miss); + + percent = percentage(req_stats->full_miss, req_grand_total); + print_val_perc_table_row(outfile, "Read full misses", UNIT_REQUESTS, + percent, "%ld", req_stats->full_miss); + + percent = percentage(req_stats->total, req_grand_total); + print_val_perc_table_row(outfile, "Read total", UNIT_REQUESTS, + percent, "%ld", req_stats->total); + + /* Handling write operations. */ + req_stats = &part_stats->write_reqs; + + hits = req_stats->total - (req_stats->partial_miss + req_stats->full_miss); + percent = percentage(hits, req_grand_total); + print_val_perc_table_section(outfile, "Write hits", UNIT_REQUESTS, percent, + "%ld", hits); + + percent = percentage(req_stats->partial_miss, req_grand_total); + print_val_perc_table_row(outfile, "Write partial misses", UNIT_REQUESTS, + percent, "%ld", req_stats->partial_miss); + + percent = percentage(req_stats->full_miss, req_grand_total); + print_val_perc_table_row(outfile, "Write full misses", UNIT_REQUESTS, + percent, "%ld", req_stats->full_miss); + + percent = percentage(req_stats->total, req_grand_total); + print_val_perc_table_row(outfile, "Write total", UNIT_REQUESTS, + percent, "%ld", req_stats->total); + + /* Pass-Through requests. */ + percent = percentage(part_stats->read_reqs.pass_through, req_grand_total); + print_val_perc_table_section(outfile, "Pass-Through reads", UNIT_REQUESTS, + percent, "%lu", + part_stats->read_reqs.pass_through); + + percent = percentage(part_stats->write_reqs.pass_through, req_grand_total); + print_val_perc_table_row(outfile, "Pass-Through writes", UNIT_REQUESTS, + percent, "%lu", + part_stats->write_reqs.pass_through); + + /* Summary. */ + serv_reqs += part_stats->read_reqs.total; + serv_reqs += part_stats->write_reqs.total; + total_reqs = serv_reqs + part_stats->read_reqs.pass_through + + part_stats->write_reqs.pass_through; + + percent = percentage(serv_reqs, req_grand_total); + print_val_perc_table_row(outfile, "Serviced requests", UNIT_REQUESTS, + percent, "%lu", serv_reqs); + + percent = percentage(total_reqs, req_grand_total); + print_val_perc_table_section(outfile, "Total requests", UNIT_REQUESTS, + percent, "%lu", total_reqs); + +} + +static void print_stats_ioclass_blk(const struct ocf_stats_io_class* part_stats, + const struct ocf_stats_io_class* denominators, FILE *outfile, + ocf_cache_line_size_t cache_line_size) +{ + float percent; + + print_table_header(outfile, 4, "Block statistics", "Count", "%", + "[Units]"); + + /* Handling read operations. */ + percent = percentage(part_stats->blocks.read, denominators->blocks.read); + print_val_perc_table_section(outfile, "Blocks reads", UNIT_BLOCKS, + percent, "%ld", + bytes_to_4k(part_stats->blocks.read)); + + /* Handling write operations. */ + percent = percentage(part_stats->blocks.write, denominators->blocks.write); + print_val_perc_table_section(outfile, "Blocks writes", UNIT_BLOCKS, + percent, "%ld", + bytes_to_4k(part_stats->blocks.write)); +} + +/** + * print statistics regarding single io class (partition) + */ +void print_stats_ioclass(const struct kcas_cache_info *cache_info, + const struct kcas_io_class *io_class, + FILE *outfile, unsigned int stats_filters, + struct ocf_stats_io_class *denominators, uint64_t req_grand_total, + ocf_cache_line_size_t cache_line_size) +{ + const struct ocf_stats_io_class *part_stats; + uint32_t part_id; + + part_id = io_class->class_id; + part_stats = &io_class->stats; + + begin_record(outfile); + + if (stats_filters & STATS_FILTER_CONF) { + print_stats_ioclass_conf(io_class, outfile); + } + + if (stats_filters & STATS_FILTER_USAGE) { + print_stats_ioclass_usage(part_id, part_stats, denominators, + outfile, cache_info->info.size, + cache_line_size); + } + + if (stats_filters & STATS_FILTER_REQ) { + print_stats_ioclass_req(part_stats, denominators, outfile, req_grand_total); + } + + if (stats_filters & STATS_FILTER_BLK) { + print_stats_ioclass_blk(part_stats, denominators, outfile, + cache_line_size); + } +} + +static int read_io_class_stats(int ctrl_fd, int cache_id, int core_id, + int part_id, + struct kcas_io_class *io_class_tmp, + struct kcas_io_class *io_class_out) +{ + memset(io_class_tmp, 0, sizeof(*io_class_tmp)); + + io_class_tmp->cache_id = cache_id; + io_class_tmp->class_id = part_id; + if (core_id != OCF_CORE_ID_INVALID) { + io_class_tmp->core_id = core_id; + io_class_tmp->get_stats = 1; + } + + if (ioctl(ctrl_fd, KCAS_IOCTL_PARTITION_STATS, io_class_tmp) < 0) { + io_class_out->ext_err_code = io_class_tmp->ext_err_code; + return FAILURE; + } + + io_class_out->ext_err_code = io_class_tmp->ext_err_code; + strncpy_s(io_class_out->info.name, sizeof(io_class_out->info.name), + io_class_tmp->info.name, sizeof(io_class_tmp->info.name) - 1); + io_class_out->class_id = io_class_tmp->class_id; + io_class_out->info.priority = io_class_tmp->info.priority; + io_class_out->info.cache_mode = io_class_tmp->info.cache_mode; + + return SUCCESS; + +} + +static inline void accum_block_stats(struct ocf_stats_block *to, const struct ocf_stats_block *from) +{ + to->read += from->read; + to->write += from->write; +} + +static inline void accum_req_stats(struct ocf_stats_req *to, const struct ocf_stats_req *from) +{ + to->full_miss += from->full_miss; + to->partial_miss += from->partial_miss; + to->total += from->total; + to->pass_through += from->pass_through; +} + +/** + * @brief print per-io-class statistics for all configured io classes + * + */ +int cache_stats_ioclasses(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, unsigned int core_id, + int io_class_id, FILE *outfile, + unsigned int stats_filters) +{ + int i, j, _core_id; + struct ocf_stats_io_class denominators; + struct ocf_stats_io_class* part_stats_cum; + struct ocf_stats_io_class* part_stats_core; + struct kcas_io_class io_class_new[OCF_IO_CLASS_MAX] = {}; + struct kcas_io_class io_class_tmp; + uint64_t req_grand_total = 0; + memset(&denominators, 0, sizeof(denominators)); + + if (-1 != io_class_id && io_class_id >= OCF_IO_CLASS_MAX) { + cas_printf(LOG_ERR, "Partition %d does not exists\n", io_class_id); + return FAILURE; + } + + for (i = 0; i < OCF_IO_CLASS_MAX; ++i) { + /* print stats for each ioclass */ + + if (!cache_info->info.core_count) { + if (read_io_class_stats(ctrl_fd, cache_id, 0, i, + &io_class_tmp, + &io_class_new[i])) { + if (io_class_new[i].ext_err_code == + OCF_ERR_IO_CLASS_NOT_EXIST) { + continue; + } + + cas_printf(LOG_ERR, + "Error while retrieving stats for partition %d\n", + i); + print_err(io_class_new[i].ext_err_code); + goto cleanup; + } + } else { + for (j = 0; j < cache_info->info.core_count; ++j) { + + _core_id = cache_info->core_id[j]; + if (core_id != OCF_CORE_ID_INVALID && core_id != _core_id) { + continue; + } + + if (read_io_class_stats(ctrl_fd, cache_id, + _core_id, i, + &io_class_tmp, + &io_class_new[i])) { + if (io_class_new[i].ext_err_code == + OCF_ERR_IO_CLASS_NOT_EXIST) { + continue; + } + + cas_printf(LOG_ERR, + "Error while retrieving stats for partition %d, core %d\n", + i, core_id); + print_err(io_class_new[i].ext_err_code); + goto cleanup; + } + + part_stats_cum = &io_class_new[i].stats; + part_stats_core = &io_class_tmp.stats; + + part_stats_cum->free_clines = + part_stats_core->free_clines; + + part_stats_cum->occupancy_clines += + part_stats_core->occupancy_clines; + part_stats_cum->dirty_clines += + part_stats_core->dirty_clines; + + accum_block_stats(&part_stats_cum->blocks, + &part_stats_core->blocks); + accum_req_stats(&part_stats_cum->read_reqs, + &part_stats_core->read_reqs); + accum_req_stats(&part_stats_cum->write_reqs, + &part_stats_core->write_reqs); + } + } + } + + for (i = 0; i < OCF_IO_CLASS_MAX; ++i) { + if (io_class_new[i].ext_err_code == OCF_ERR_IO_CLASS_NOT_EXIST) { + continue; + } + const struct ocf_stats_io_class *ps = &io_class_new[i].stats; + + denominators.occupancy_clines += ps->occupancy_clines; + denominators.dirty_clines += ps->dirty_clines; + + accum_block_stats(&denominators.blocks, &ps->blocks); + + accum_req_stats(&denominators.read_reqs, &ps->read_reqs); + accum_req_stats(&denominators.write_reqs, &ps->write_reqs); + } + req_grand_total += denominators.read_reqs.total; + req_grand_total += denominators.read_reqs.pass_through; + req_grand_total += denominators.write_reqs.total; + req_grand_total += denominators.write_reqs.pass_through; + + if (-1 == io_class_id) { + for (i = 0; i < OCF_IO_CLASS_MAX; ++i) { + if (io_class_new[i].ext_err_code == OCF_ERR_IO_CLASS_NOT_EXIST) { + continue; + } + print_stats_ioclass(cache_info, &io_class_new[i], + outfile, stats_filters, &denominators, req_grand_total, + cache_info->info.cache_line_size / KiB); + } + } else { + if (io_class_new[io_class_id].ext_err_code == OCF_ERR_IO_CLASS_NOT_EXIST) { + cas_printf(LOG_ERR, "Partition %d does not exists\n", io_class_id); + return FAILURE; + } + print_stats_ioclass(cache_info, &io_class_new[io_class_id], + outfile, stats_filters, &denominators, req_grand_total, + cache_info->info.cache_line_size / KiB); + } + + return SUCCESS; + +cleanup: + close(ctrl_fd); + + if (outfile != stdout) { + fclose(outfile); + } + return FAILURE; +} + +static inline void accum_error_stats(struct ocf_stats_error *to, + const struct ocf_stats_error *from) +{ + to->read += from->read; + to->write += from->write; +} + +int cache_stats_cores(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, unsigned int core_id, int io_class_id, + FILE *outfile, unsigned int stats_filters) +{ + int i; + int _core_id; + uint32_t cache_size; + ocf_cache_line_size_t cache_line_size; + struct kcas_core_info core_info; + + for (i = 0; i < cache_info->info.core_count; ++i) { + /* if user only requested stats pertaining to a specific core, + skip all other cores */ + _core_id = cache_info->core_id[i]; + if ((core_id != OCF_CORE_ID_INVALID) && (core_id != _core_id)) { + continue; + } + /* call function to print stats */ + if (get_core_info(ctrl_fd, cache_id, _core_id, &core_info)) { + cas_printf(LOG_ERR, "Error while retrieving stats for core %d\n", _core_id); + print_err(core_info.ext_err_code); + return FAILURE; + } + + cache_size = cache_info->info.size; + cache_line_size = cache_info->info.cache_line_size / KiB; + + cache_stats_core_counters(&core_info, cache_size, + cache_info->info.occupancy, + stats_filters, outfile, cache_line_size); + } + + return SUCCESS; +} + +int cache_stats_conf(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, FILE *outfile, + unsigned int stats_filters) +{ + float flush_progress = 0; + float value; + const char *units; + long unsigned int cache_size; + const char *cache_path; + char dev_path[MAX_STR_LEN]; + int inactive_cores; + + if (get_dev_path(cache_info->cache_path_name, dev_path, sizeof(dev_path)) != SUCCESS) + cache_path = cache_info->cache_path_name; + else + cache_path = dev_path; + + flush_progress = calculate_flush_progress(cache_info->info.dirty, + cache_info->info.flushed); + + print_kv_pair(outfile, "Cache Id", "%d", + cache_info->cache_id); + + cache_size = cache_line_in_4k(cache_info->info.size, + cache_info->info.cache_line_size / KiB); + + print_kv_pair(outfile, "Cache Size", "%lu, [4KiB Blocks], %.2f, [GiB]", + cache_size, + (float) cache_size * (4 * KiB) / GiB); + + print_kv_pair(outfile, "Cache Device", "%s", + cache_path); + print_kv_pair(outfile, "Core Devices", "%d", + cache_info->info.core_count); + inactive_cores = get_inactive_core_count(cache_info); + if (inactive_cores < 0) + return FAILURE; + print_kv_pair(outfile, "Inactive Core Devices", "%d", inactive_cores); + + print_kv_pair(outfile, "Write Policy", "%s%s", + (flush_progress && cache_info->info.cache_mode != ocf_cache_mode_wb) + ? "wb->" : "", cache_mode_to_name(cache_info->info.cache_mode)); + print_kv_pair(outfile, "Eviction Policy", "%s", + eviction_policy_to_name(cache_info->info.eviction_policy)); + print_kv_pair(outfile, "Cleaning Policy", "%s", + cleaning_policy_to_name(cache_info->info.cleaning_policy)); + print_kv_pair(outfile, "Cache line size", "%llu, [KiB]", + cache_info->info.cache_line_size / KiB); + + metadata_memory_footprint(cache_info->info.metadata_footprint, + &value, &units); + print_kv_pair(outfile, "Metadata Memory Footprint", "%.1f, [%s]", + value, units); + + print_kv_pair_time(outfile, "Dirty for", cache_info->info.dirty_for); + + print_kv_pair(outfile, "Metadata Mode", "%s", + metadata_mode_to_name(cache_info->metadata_mode)); + + if (flush_progress) { + print_kv_pair(outfile, "Status", "%s (%3.1f %%)", + "Flushing", flush_progress); + } else { + print_kv_pair(outfile, "Status", "%s", + get_cache_state_name(cache_info->info.state)); + } + + return SUCCESS; +} + +int cache_stats_usage(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, FILE* outfile) +{ + print_usage_header(outfile); + + print_val_perc_table_row(outfile, "Occupancy", UNIT_BLOCKS, + percentage(cache_info->info.occupancy, + cache_info->info.size), + "%lu", + cache_line_in_4k(cache_info->info.occupancy, + cache_info->info.cache_line_size / KiB)); + + print_val_perc_table_row(outfile, "Free", UNIT_BLOCKS, + percentage(cache_info->info.size - + cache_info->info.occupancy, + cache_info->info.size), + "%lu", + cache_line_in_4k(cache_info->info.size - + cache_info->info.occupancy, + cache_info->info.cache_line_size / KiB)); + + print_val_perc_table_row(outfile, "Clean", UNIT_BLOCKS, + percentage(cache_info->info.occupancy - + cache_info->info.dirty, + cache_info->info.occupancy), + "%lu", + cache_line_in_4k(cache_info->info.occupancy - + cache_info->info.dirty, + cache_info->info.cache_line_size / KiB)); + + print_val_perc_table_row(outfile, "Dirty", UNIT_BLOCKS, + percentage(cache_info->info.dirty, + cache_info->info.occupancy), + "%lu", + cache_line_in_4k(cache_info->info.dirty, + cache_info->info.cache_line_size / KiB)); + + return SUCCESS; +} + +int cache_stats_inactive_usage(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, FILE* outfile) +{ + print_table_header(outfile, 4, "Inactive usage statistics", "Count", + "%", "[Units]"); + + print_val_perc_table_row(outfile, "Inactive Occupancy", UNIT_BLOCKS, + percentage(cache_info->info.inactive.occupancy, + cache_info->info.size), + "%lu", + cache_line_in_4k(cache_info->info.inactive.occupancy, + cache_info->info.cache_line_size / KiB)); + + print_val_perc_table_row(outfile, "Inactive Clean", UNIT_BLOCKS, + percentage(cache_info->info.inactive.occupancy - + cache_info->info.inactive.dirty, + cache_info->info.occupancy), + "%lu", + cache_line_in_4k(cache_info->info.inactive.occupancy - + cache_info->info.inactive.dirty, + cache_info->info.cache_line_size / KiB)); + + print_val_perc_table_row(outfile, "Inactive Dirty", UNIT_BLOCKS, + percentage(cache_info->info.inactive.dirty, + cache_info->info.occupancy), + "%lu", + cache_line_in_4k(cache_info->info.inactive.dirty, + cache_info->info.cache_line_size / KiB)); + + return SUCCESS; +} + +int cache_stats_counters(int ctrl_fd, const struct kcas_cache_info *cache_info, + unsigned int cache_id, FILE *outfile, + unsigned int stats_filters) +{ + int i; + int _core_id; + struct ocf_stats_core *stats; + struct ocf_stats_core total_stats; + struct kcas_core_info core_info; + + struct ocf_stats_error total_cache_errors, total_core_errors; + + memset(&total_stats, 0, sizeof(total_stats)); + + memset(&total_cache_errors, 0, sizeof(total_cache_errors)); + memset(&total_core_errors, 0, sizeof(total_core_errors)); + + for (i = 0; i < cache_info->info.core_count; ++i) { + /* if user only requested stats pertaining to a specific core, + skip all other cores */ + _core_id = cache_info->core_id[i]; + /* call function to print stats */ + if (get_core_info(ctrl_fd, cache_id, _core_id, &core_info)) { + cas_printf(LOG_ERR, "Error while retrieving stats for core %d\n", _core_id); + print_err(core_info.ext_err_code); + return FAILURE; + } + + stats = &core_info.stats; + + /* Convert block stats to 4k before adding them up. This way + sum of block stats for cores is consistent with cache + stats */ + stats->cache_volume = convert_block_stats_to_4k(&stats->cache_volume); + stats->core_volume = convert_block_stats_to_4k(&stats->core_volume); + stats->core = convert_block_stats_to_4k(&stats->core); + + accum_block_stats(&total_stats.cache_volume, &stats->cache_volume); + accum_block_stats(&total_stats.core_volume, &stats->core_volume); + accum_block_stats(&total_stats.core, &stats->core); + + accum_req_stats(&total_stats.read_reqs, &stats->read_reqs); + accum_req_stats(&total_stats.write_reqs, &stats->write_reqs); + + accum_error_stats(&total_cache_errors, &stats->cache_errors); + accum_error_stats(&total_core_errors, &stats->core_errors); + } + + /* Totals for requests stats. */ + if (stats_filters & STATS_FILTER_REQ) { + print_req_stats(&total_stats, outfile); + } + + /* Totals for blocks stats. */ + if (stats_filters & STATS_FILTER_BLK) { + print_table_header(outfile, 4, "Block statistics", "Count", + "%", "[Units]"); + print_block_section(&total_stats.core_volume, "core(s)", outfile, + cache_info->info.cache_line_size / KiB); + print_block_section(&total_stats.cache_volume, "cache", outfile, + cache_info->info.cache_line_size / KiB); + print_block_section(&total_stats.core, "exported object(s)", outfile, + cache_info->info.cache_line_size / KiB); + } + + /* Totals for error stats. */ + if (stats_filters & STATS_FILTER_ERR) { + print_table_header(outfile, 4, "Error statistics", "Count", "%", + "[Units]"); + print_error_section(&total_cache_errors, "Cache", outfile); + print_error_section(&total_core_errors, "Core", outfile); + + print_error_stats_total(&total_cache_errors, &total_core_errors, + outfile); + } + + return SUCCESS; +} + + +struct stats_printout_ctx +{ + FILE *intermediate; + FILE *out; + int type; + int result; +}; +void *stats_printout(void *ctx) +{ + struct stats_printout_ctx *spc = ctx; + if (stat_format_output(spc->intermediate, + spc->out, spc->type)) { + cas_printf(LOG_ERR, "An error occured during statistics formatting.\n"); + spc->result = FAILURE; + } else { + spc->result = SUCCESS; + } + + return 0; +} + +bool _usage_stats_is_valid(struct kcas_cache_info *cmd_info) +{ + return (cmd_info->info.size >= cmd_info->info.occupancy); +} +/** + * @brief print cache statistics in various variants + * + * this routine implements -P (--stats) subcommand of casadm. + * @param cache_id id of a cache, to which stats query pertains + * @param stats_filters subset of statistics to be displayed. If filters are not + * specified STATS_FILTER_DEFAULT are displayd. + * @param fpath path to an output CSV file to which statistics shall be printed. single "-" + * can be passed as a path, to generate CSV to stdout. Henceforth non-NULL value of + * fpath is a sign that stats shall be printed in CSV-format, and NULL value will] + * cause stats to be printed in pretty tables. + * + * @return SUCCESS upon successful printing of statistic. FAILURE if any error happens + */ +int cache_status(unsigned int cache_id, unsigned int core_id, int io_class_id, + unsigned int stats_filters, unsigned int output_format) +{ + int ctrl_fd, i; + int ret = SUCCESS; + int attempt_no = 0; + struct kcas_cache_info cache_info; + + ctrl_fd = open_ctrl_device(); + + if (ctrl_fd < 0) { + print_err(KCAS_ERR_SYSTEM); + return FAILURE; + } + + /** + * + * Procedure of printing out statistics is as follows: + * + * + * statistics_model.c (retrieve structures from kernel, don't do formatting) + * | + * v + * abstract CSV notation with prefixes (as a temporary file) + * | + * v + * statistics_view (parse basic csv notation, generate proper output) + * | + * v + * desired output format + * + */ + + /* 1 is writing end, 0 is reading end of a pipe */ + FILE *intermediate_file[2]; + + if (create_pipe_pair(intermediate_file)) { + cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n"); + return FAILURE; + } + + /* Select file to which statistics shall be printed and + * + */ + FILE *outfile; + + outfile = stdout; + + /** + * printing in statistics will be performed in separate + * thread, so that we can interleave statistics collecting + * and formatting tables + */ + struct stats_printout_ctx printout_ctx; + printout_ctx.intermediate = intermediate_file[0]; + printout_ctx.out = outfile; + printout_ctx.type = (OUTPUT_FORMAT_CSV == output_format ? CSV : TEXT); + pthread_t thread; + pthread_create(&thread, 0, stats_printout, &printout_ctx); + + memset(&cache_info, 0, sizeof(cache_info)); + + cache_info.cache_id = cache_id; + + do { + if (0 != attempt_no) { + usleep(300 * 1000); + } + + if (ioctl(ctrl_fd, KCAS_IOCTL_CACHE_INFO, &cache_info) < 0) { + cas_printf(LOG_ERR, "Cache Id %d not running\n", cache_id); + ret = FAILURE; + goto cleanup; + } + + /* Check if core exists in cache */ + if (core_id != OCF_CORE_ID_INVALID) { + for (i = 0; i < cache_info.info.core_count; ++i) { + if (core_id == cache_info.core_id[i]) { + break; + } + } + if (i == cache_info.info.core_count) { + cas_printf(LOG_ERR, "No such core device in cache.\n"); + ret = FAILURE; + goto cleanup; + } + } + + attempt_no++; + } while (false == _usage_stats_is_valid(&cache_info) && + (attempt_no < ALLOWED_NUMBER_OF_ATTEMPTS)); + + if (stats_filters & STATS_FILTER_IOCLASS) { + if (cache_stats_ioclasses(ctrl_fd, &cache_info, cache_id, + core_id, io_class_id, + intermediate_file[1], + stats_filters)) { + return FAILURE; + } + } else if (core_id == OCF_CORE_ID_INVALID) { + + begin_record(intermediate_file[1]); + if (stats_filters & STATS_FILTER_CONF) { + if (cache_stats_conf(ctrl_fd, &cache_info, + cache_id, + intermediate_file[1], + stats_filters)) { + ret = FAILURE; + goto cleanup; + } + } + + if (stats_filters & STATS_FILTER_USAGE) { + if (cache_stats_usage(ctrl_fd, &cache_info, + cache_id, + intermediate_file[1])) { + ret = FAILURE; + goto cleanup; + } + } + if ((cache_info.info.state & (1 << ocf_cache_state_incomplete)) + && stats_filters & STATS_FILTER_USAGE) { + if (cache_stats_inactive_usage(ctrl_fd, &cache_info, + cache_id, + intermediate_file[1])) { + ret = FAILURE; + goto cleanup; + } + } + + if (stats_filters & STATS_FILTER_COUNTERS) { + if (cache_stats_counters(ctrl_fd, &cache_info, + cache_id, + intermediate_file[1], + stats_filters)) { + ret = FAILURE; + goto cleanup; + } + } + + } else { + /* print per core statistics. this may include: + * - core header + * - core counters + * - core per io class statistics + * + * depending on which set of statistics is enabled via -f/-d switches. + */ + if (cache_stats_cores(ctrl_fd, &cache_info, cache_id, + core_id, io_class_id, + intermediate_file[1], stats_filters)) { + + ret = FAILURE; + goto cleanup; + } + } + +cleanup: + close(ctrl_fd); + fclose(intermediate_file[1]); + pthread_join(thread, 0); + if (printout_ctx.result) { + ret = 1; + } + + fclose(intermediate_file[0]); + + if (outfile != stdout) { + fclose(outfile); + } + return ret; +} diff --git a/casadm/statistics_view.c b/casadm/statistics_view.c new file mode 100644 index 000000000..3c74eb524 --- /dev/null +++ b/casadm/statistics_view.c @@ -0,0 +1,117 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include "cas_lib.h" +#include "csvparse.h" +#include "string.h" +#include "statistics_view.h" +#include "statistics_view_structs.h" +#include "statistics_view_text.h" +#include "statistics_view_csv.h" +#include "statistics_view_raw_csv.h" + +static struct view_t *construct_view(int format, FILE *outfile) +{ + struct view_t *out = calloc(1, sizeof(*out)); + if (!out) { + return NULL; + } + + switch (format) { + case CSV: + out->process_row = csv_process_row; + out->end_input = csv_end_input; + out->construct = csv_construct; + out->destruct = csv_destruct; + break; + case RAW_CSV: + out->process_row = raw_csv_process_row; + out->end_input = raw_csv_end_input; + out->construct = raw_csv_construct; + out->destruct = raw_csv_destruct; + break; + case TEXT: + out->process_row = text_process_row; + out->end_input = text_end_input; + out->construct = text_construct; + out->destruct = text_destruct; + break; + } + out->outfile = outfile; + out->construct(out); + return out; +}; + +void destruct_view(struct view_t* v) +{ + v->destruct(v); + free(v); +} + +#define RECOGNIZE_TYPE(t) if (!strcmp(cols[0], TAG_NAME(t))) {type = t;} + +int stat_print_intermediate(FILE *infile, FILE *outfile) +{ + char buf[MAX_STR_LEN] = { 0 }; + while (fgets(buf, MAX_STR_LEN, infile)) { + fprintf(outfile, "%s", buf); + } + + return 0; +} +int stat_format_output(FILE *infile, FILE *outfile, int format) +{ + int result = 0; + if (format == PLAIN) { + return stat_print_intermediate(infile, outfile); + } + struct view_t *view = construct_view(format, outfile); + if (!view) { + cas_printf(LOG_ERR, "Failed to allocate memory for output generator\n"); + return 1; + } + CSVFILE *cf = csv_fopen(infile); + if (!cf) { + cas_printf(LOG_ERR, "Failed to allocate memory for CSV parser\n"); + destruct_view(view); + return 1; + } + + while (!csv_read(cf)) { + int num_cols = csv_count_cols(cf); + char **cols = csv_get_col_ptr(cf); + int type = UNDEFINED_TAG; + if (num_cols<1) { + continue; + } + RECOGNIZE_TYPE(FREEFORM); + RECOGNIZE_TYPE(KV_PAIR); + RECOGNIZE_TYPE(TABLE_ROW); + RECOGNIZE_TYPE(TABLE_HEADER); + RECOGNIZE_TYPE(TABLE_SECTION); + RECOGNIZE_TYPE(TREE_HEADER); + RECOGNIZE_TYPE(TREE_BRANCH); + RECOGNIZE_TYPE(TREE_LEAF); + RECOGNIZE_TYPE(RECORD); + RECOGNIZE_TYPE(DATA_SET); + if (type == UNDEFINED_TAG) { + cas_printf(LOG_ERR, "Unrecognized tag: %s\n", cols[0]); + result = 1; + break; + } + if (view->process_row(view, type, num_cols-1, cols+1)) { + cas_printf(LOG_ERR, "Failed to process row starting with: %s\n", cols[0]); + result = 1; + break; + } + } + view->end_input(view); + + csv_close_nu(cf); + destruct_view(view); + return result; +} diff --git a/casadm/statistics_view.h b/casadm/statistics_view.h new file mode 100644 index 000000000..e0293d8d4 --- /dev/null +++ b/casadm/statistics_view.h @@ -0,0 +1,79 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __STAT_VIEW +#define __STAT_VIEW + +#include + +/* each line of statistics may be assigned one fo these semantic formats, + * to which it will be converted */ + +enum tag_type { + FREEFORM, /**< free form text */ + KV_PAIR, /**< key value pair. sequence of kv-pairs will be aligned to + columns, but no table-styleborders will be drawn */ + TABLE_ROW, /**< regular table row */ + TABLE_HEADER, /**< table header */ + TABLE_SECTION, /**< first row of a table section */ + DATA_SET, /**< set of records */ + RECORD, /**< one record of data */ + TREE_HEADER, + TREE_BRANCH, + TREE_LEAF, + UNDEFINED_TAG /**< occurence of this (or anything else out of + above tags) will immediately break processing */ +}; + +#define TAG(x) #x "," +#define TAG_NAME(x) #x + +enum format { + TEXT, /**< output in text (formatted tables) form */ + CSV, /**< output in csv form */ + RAW_CSV, /**< csv form without transformations */ + PLAIN /** +#include +#include +#include "statistics_view.h" +#include "statistics_view_structs.h" +#include "statistics_view_csv.h" + +#define VALS_BUFFER_INIT_SIZE 10 + +/** + * private data of CSV output formatter + */ +struct csv_out_prv { + int data_set; /* current data set number */ + int record; /* current record number */ + int column; /* current column number */ + char **vals; + char **titles; + int max_vals; + int cur_val; + int max_titles; + int cur_title; +}; + +static inline int csv_is_first_record(struct view_t *this) +{ + return 1 == this->ctx.csv_prv->record; +} + +static inline int csv_is_unit_string(const char *s) +{ + return NULL != s && '[' == s[0]; +} + +static void csv_output_column(struct view_t *this, const char *s) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + + if (prv->column) { + putc(',', this->outfile); + } + + if (strstr(s, ",")) { + fprintf(this->outfile, "\"%s\"", s); + } else { + fprintf(this->outfile, "%s", s); + } + prv->column++; +} + +static char **csv_check_container(char **container, int *max_vals, + int cur_val) +{ + if (!container) { + *max_vals = VALS_BUFFER_INIT_SIZE; + container = calloc(sizeof(char *), *max_vals); + if (!container) { + return NULL; + } + } + + /* Resize val pointers array if needed */ + if (*max_vals < cur_val) { + *max_vals = *max_vals * 2; + if (*max_vals < cur_val) { + *max_vals = cur_val; + } + container = realloc(container, sizeof(char *) * (*max_vals)); + if (!container) { + return NULL; + } + } + + return container; +} + +static int csv_output_data(struct view_t *this, const char *s) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + if (csv_is_first_record(this)) { + prv->vals = csv_check_container(prv->vals, &prv->max_vals, + prv->cur_val+1); + if (!prv->vals) { + return 1; + } + + /* Store value */ + prv->vals[prv->cur_val] = strdup(s); + if (!prv->vals[prv->cur_val]) { + return 1; + } + prv->cur_val++; + } else { + csv_output_column(this, s); + } + return 0; +} + +static int csv_add_column_subtitle(struct view_t *this, const char *s) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + + prv->titles = csv_check_container(prv->titles, &prv->max_titles, + prv->cur_title+1); + if (!prv->titles) { + return 1; + } + + /* Store value */ + prv->titles[prv->cur_title] = strdup(s); + if (!prv->titles[prv->cur_title]) { + return 1; + } + prv->cur_title++; + + return 0; +} + +static void csv_output_header(struct view_t *this, const char *title, + const char *unit) +{ + static char buff[64]; + if (unit) { + if (csv_is_unit_string(unit)) { + snprintf(buff, sizeof(buff), "%s %s", title, unit); + } else { + snprintf(buff, sizeof(buff), "%s [%s]", title, unit); + } + csv_output_column(this, buff); + } else { + csv_output_column(this, title); + } +} + +static void csv_finish_record(struct view_t *this) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + int i; + + if (prv->column) { + putc('\n', this->outfile); + } + + /* + * For first record we need to output stored data values + */ + if (csv_is_first_record(this)) { + prv->column = 0; + for (i = 0; i < prv->cur_val; ++i) { + csv_output_column(this, prv->vals[i]); + } + if (prv->column) { + putc('\n', this->outfile); + } + } + fflush(this->outfile); +} + +static void csv_free_vals(struct view_t *this) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + int i; + + if (prv->vals) { + for (i = 0; i < prv->cur_val; ++i) { + free(prv->vals[i]); + } + free(prv->vals); + prv->vals = NULL; + prv->cur_val = 0; + prv->max_vals = 0; + } +} + +static void csv_free_titles(struct view_t *this) +{ + struct csv_out_prv *prv = this->ctx.csv_prv; + int i; + + if (prv->titles) { + for (i = 0; i < prv->cur_title; ++i) { + free(prv->titles[i]); + } + free(prv->titles); + prv->titles = NULL; + prv->cur_title = 0; + prv->max_titles = 0; + } +} + +int csv_process_row(struct view_t *this, int type, int num_fields, char *fields[]) +{ + int i; + struct csv_out_prv *prv = this->ctx.csv_prv; + const char *unit = NULL; + + switch (type) { + case DATA_SET: + if (prv->record) { + csv_finish_record(this); + } + csv_free_titles(this); + csv_free_vals(this); + if (prv->data_set) { + putc('\n', this->outfile); + } + if (num_fields > 0) { + fprintf(this->outfile, "%s\n", fields[0]); + } + prv->record = 0; + prv->data_set++; + break; + case RECORD: + if (prv->record) { + csv_finish_record(this); + } + prv->column = 0; + prv->record++; + break; + + /* + * For KV pair assume that values are interleaved + * with units, so output every second value, + * and use units to construct column headers. + * For example: + * KV_PAIR,Cache Size,10347970,[4KiB blocks],39.47,[GiB] + * will result in: + * data row: 10347970,39.47 + * header row: Cache Size [4KiB blocks],Cache Size [GiB] + */ + case KV_PAIR: + for (i = 1; i < num_fields; i += 2) { + if (csv_is_first_record(this)) { + if (i + 1 < num_fields) { + csv_output_header(this, fields[0], + fields[i+1]); + } else { + csv_output_header(this, fields[0], NULL); + } + } + if (csv_output_data(this, fields[i])) { + return 1; + } + } + break; + + /* + * For table rows assume the following format: + * TABLE_{ROW,SECTION},Title,value1,value2,value3,...,unit + * This will result in: + * data row: value1,value2,value3,... + * header row: Title [unit],Title [col1_title],Title [col2_title],... + */ + case TABLE_HEADER: + csv_free_titles(this); + csv_add_column_subtitle(this, ""); + for (i = 2; i < num_fields; i++) { + if (csv_add_column_subtitle(this, fields[i])) { + return 1; + } + } + break; + case TABLE_SECTION: + case TABLE_ROW: + if (csv_is_first_record(this)) { + unit = NULL; + if (csv_is_unit_string(fields[num_fields-1])) { + unit = fields[num_fields-1]; + } + csv_output_header(this, fields[0], unit); + for (i = 2; i < num_fields; i++) { + if (!csv_is_unit_string(prv->titles[i-1])) { + csv_output_header(this, fields[0], + prv->titles[i-1]); + } + } + } + for (i = 1; i < num_fields; i++) { + if (!csv_is_unit_string(prv->titles[i-1])) { + if (csv_output_data(this, fields[i])) { + return 1; + } + } + } + break; + } + return 0; +} + +int csv_end_input(struct view_t *this) +{ + csv_finish_record(this); + return 0; +} +int csv_construct(struct view_t *this) +{ + struct csv_out_prv *prv = calloc(sizeof(struct csv_out_prv), 1); + + if (!prv) { + return 1; + } + this->ctx.csv_prv = prv; + + return 0; +} + +int csv_destruct(struct view_t *this) +{ + csv_free_vals(this); + csv_free_titles(this); + free(this->ctx.csv_prv); + return 0; +} + diff --git a/casadm/statistics_view_csv.h b/casadm/statistics_view_csv.h new file mode 100644 index 000000000..df40d1cff --- /dev/null +++ b/casadm/statistics_view_csv.h @@ -0,0 +1,18 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __STATS_VIEW_CSV +#define __STATS_VIEW_CSV + +int csv_process_row(struct view_t *this, int type, int num_fields, char *fields[]); + +int csv_end_input(struct view_t *this); + +int csv_construct(struct view_t *this); + +int csv_destruct(struct view_t *this); + + +#endif diff --git a/casadm/statistics_view_raw_csv.c b/casadm/statistics_view_raw_csv.c new file mode 100644 index 000000000..40b8ec03e --- /dev/null +++ b/casadm/statistics_view_raw_csv.c @@ -0,0 +1,49 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#define _GNU_SOURCE +#include +#include +#include +#include "statistics_view.h" +#include "statistics_view_structs.h" +#include "statistics_view_raw_csv.h" + +#define VALS_BUFFER_INIT_SIZE 10 + +int raw_csv_process_row(struct view_t *this, int type, int num_fields, char *fields[]) +{ + int i; + if (RECORD != type && DATA_SET != type) { + for (i = 0; i < num_fields; i++) { + if (i) { + fputc(',', this->outfile); + } + if (strstr(fields[i], ",")) { + fprintf(this->outfile, "\"%s\"", fields[i]); + } else { + fprintf(this->outfile, "%s", fields[i]); + } + } + fputc('\n', this->outfile); + + } + return 0; +} + +int raw_csv_end_input(struct view_t *this) +{ + return 0; +} +int raw_csv_construct(struct view_t *this) +{ + return 0; +} + +int raw_csv_destruct(struct view_t *this) +{ + return 0; +} + diff --git a/casadm/statistics_view_raw_csv.h b/casadm/statistics_view_raw_csv.h new file mode 100644 index 000000000..1628953ec --- /dev/null +++ b/casadm/statistics_view_raw_csv.h @@ -0,0 +1,18 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __STATS_VIEW_RAW_CSV +#define __STATS_VIEW_RAW_CSV + +int raw_csv_process_row(struct view_t *this, int type, int num_fields, char *fields[]); + +int raw_csv_end_input(struct view_t *this); + +int raw_csv_construct(struct view_t *this); + +int raw_csv_destruct(struct view_t *this); + + +#endif diff --git a/casadm/statistics_view_structs.h b/casadm/statistics_view_structs.h new file mode 100644 index 000000000..6f7707f7f --- /dev/null +++ b/casadm/statistics_view_structs.h @@ -0,0 +1,29 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __STATS_VIEW_S_H +#define __STATS_VIEW_S_H + +struct csv_out_prv; + +struct text_out_prv; + +struct view_t +{ + FILE *outfile; + union { + struct csv_out_prv *csv_prv; + struct text_out_prv *text_prv; + } ctx; + /* type specific init */ + int (*construct)(struct view_t *this); + int (*process_row)(struct view_t *this, int type, int num_fields, char *fields[]); + int (*end_input)(struct view_t *this); + int (*destruct)(struct view_t *this); +}; + + +#endif + diff --git a/casadm/statistics_view_text.c b/casadm/statistics_view_text.c new file mode 100644 index 000000000..07f1fc5cd --- /dev/null +++ b/casadm/statistics_view_text.c @@ -0,0 +1,1025 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "vt100codes.h" +#include "csvparse.h" +#include "statistics_view.h" +#include "statistics_view_structs.h" +#include "statistics_view_text.h" +#include "table.h" +#include "intvector.h" +#include +#include +#include +#include "safeclib/safe_mem_lib.h" +#include "safeclib/safe_str_lib.h" +#include +#include "cas_lib.h" + +#define NUMBER_COLOR FG_COLOR_YELLOW +#define UNIT_COLOR FG_COLOR_CYAN +#define PATH_COLOR FG_COLOR_MAGENTA +#define TREE_BRANCH_COLOR FG_COLOR_BLUE + +/** + * table drawing character set + */ +struct table_draw_characters { + int outer_horiz; /**< thin horizontal line */ + int outer_right; /**< T facing right border */ + int outer_left; /**< T facing left border */ + int outer_vert; /**< bold vertical line */ + int outer_x; /**< intersection of bold lines */ + + int outer_lt; /**< Left top corner of a frame */ + int outer_lb; /**< Left bottom corner of a frame */ + int outer_rt; /**< Right top corner of a frame */ + int outer_rb; /**< Right bottom corner of a frame */ + + int inner_horiz; /**< thin horizontal line */ + int inner_right; /**< T facing right border */ + int inner_left; /**< T facing right border */ + int inner_top; /**< T facing top border */ + int inner_bottom; /**< T facing bottom border */ + int inner_vert; /**< thin vertical line */ + int inner_x; /**< intersection of thin lines */ + + int tree_node; /**< tree node (but not last) */ + int tree_node_last; /**< last tree node */ +}; + +/** + * private data of text output formatter + */ +struct text_out_prv { + struct table *t; /**< currently processed table (freed and reallocated frequently */ + struct intvector col_w; /**< set of column widths */ + struct intvector row_types; /**< set of row types (whenever table rows are headers, + sections or regulars */ + struct table_draw_characters tc; /**< set of table draw characters */ + bool dec_fmt; /**< whenever output shall utilize xterm/DEC VT1xx features */ + /** + * actual number of columns - may be less than number of + * columns in t array, when line breaking is used */ + int num_cols; + /** size of buffer for reconstructed cell */ + int cell_buffer_size; + /** buffer for reconstructed cell */ + char *cell_buffer; + int col_ptr; /**< column pointer for key value printing */ +}; + +int text_construct(struct view_t *this) +{ + struct text_out_prv *prv = calloc(sizeof(struct text_out_prv),1); + struct table_draw_characters *tc = &prv->tc; + this->ctx.text_prv = prv; + if (!prv) { + return 1; + } + prv->t = table_alloc(); + if (!prv->t) { + return 1; + } + if (vector_alloc_placement(&prv->col_w)) { + table_free(prv->t); + return 1; + } + if (vector_alloc_placement(&prv->row_types)) { + table_free(prv->t); + vector_free_placement(&prv->col_w); + return 1; + } + const char *term = getenv("TERM"); + if (term && (!strncmp(term, "xterm", 5) || !strcmp(term, "screen"))) { + prv->dec_fmt = true; + } else { + prv->dec_fmt = false; + } + + /* use "Lang" to detect if utf8 is about to be used. + * additionally use UTF8 frames only for dec_fmt-style terminal (others + * typically lack utf8 fonts */ + const char *lang = getenv("LANG"); + + if (prv->dec_fmt && lang && strcasestr(lang, "UTF-8")) { + /* + * if you want to add more box drawing characters to this mechanism, + * please look up for their UNICODE codes i.e. here: + * https://en.wikipedia.org/wiki/Box-drawing_character#Unicode + * + * (don't enter those sequences in code directly as some editors + * badly display them - and vi Emacs are OK, current Eclipse is good + * too, but others cause problem. Also external tools don't like + * them too much) + */ + tc->outer_horiz = 0x2550; + tc->outer_right = 0x2563; /* T facing right border */ + tc->outer_left = 0x2560; + tc->outer_vert = 0x2551; + tc->outer_x = 0x256a; + + tc->outer_lt = 0x2554; + tc->outer_lb = 0x255a; + tc->outer_rt = 0x2557; + tc->outer_rb = 0x255d; + + tc->inner_horiz = 0x2500; + tc->inner_vert = 0x2502; + tc->inner_left = 0x255f; + tc->inner_top = 0x2564; + tc->inner_bottom = 0x2567; + tc->inner_right = 0x2562; + tc->inner_x = 0x253c; + + tc->tree_node = 0x2514; + tc->tree_node_last = 0x251c; + } else { + tc->outer_horiz = '='; + tc->outer_right = '+'; /* T facing right border */ + tc->outer_left = '+'; + tc->outer_vert = '|'; + tc->outer_x = '+'; + + tc->outer_lt = '+'; + tc->outer_lb = '+'; + tc->outer_rt = '+'; + tc->outer_rb = '+'; + + tc->inner_horiz = '-'; + tc->inner_vert = '|'; + tc->inner_right = '+'; /* T facing right border */ + tc->inner_left = '+'; + tc->inner_top = '+'; + tc->inner_bottom = '+'; + tc->inner_x = '+'; + + tc->tree_node = '+'; + tc->tree_node_last = '+'; + } + + if (!isatty(fileno(this->outfile))) { + prv->dec_fmt = false; /* if output is NOT a tty, don't use + * dec_fmt features */ + } + + const char *casadm_colors = getenv("CASADM_COLORS"); + if (casadm_colors && casadm_colors[0]) { + prv->dec_fmt = true; + } + + prv->cell_buffer = 0; + prv->cell_buffer_size = 0; + + return 0; +} + +int text_destruct(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + table_free(prv->t); + vector_free_placement(&prv->col_w); + vector_free_placement(&prv->row_types); + if (prv->cell_buffer) { + free(prv->cell_buffer); + } + free(prv); + return 0; +} + +/** + * utf8-encoding version of putc + */ +void putcu8(int c, FILE *out) +{ + if (c < (1 << 7)) /* 7 bit Unicode encoded as plain ascii */ { + putc(c, out); + return; + } + if (c < (1 << 11)) /* 11 bit Unicode encoded in 2 UTF-8 bytes */ { + putc((c >> 6) | 0xC0, out); + putc((c & 0x3F) | 0x80, out); + return; + } + if (c < (1 << 16)) /* 16 bit Unicode encoded in 3 UTF-8 bytes */ { + putc(((c >> 12)) | 0xE0, out); + putc(((c >> 6) & 0x3F) | 0x80, out); + putc((c & 0x3F) | 0x80, out); + return; + } + if (c < (1 << 21))/* 21 bit Unicode encoded in 4 UTF-8 bytes */ { + putc(((c >> 18)) | 0xF0, out); + putc(((c >> 12) & 0x3F) | 0x80, out); + putc(((c >> 6) & 0x3F) | 0x80, out); + putc((c & 0x3F) | 0x80, out); + return; + } +} + +/** + * Types of table horizontal rule + */ +enum hr_type { + TOP, + AFTER_HEADER, + INTERNAL, + BOTTOM +}; + +/** + * print table horizontal rule. + * @param this output formatter object + * @param mode style of horizontal rule to be printed, + * as per enum hr_type + */ +static int print_table_hr(struct view_t *this, int mode) +{ + struct text_out_prv *prv = this->ctx.text_prv; + struct table_draw_characters *tc = &prv->tc; + int i,j; + int w = prv->num_cols; + + for (j = 0 ; j != w ; ++j) { + if (0 == j) { + if (TOP == mode) { + putcu8(tc->outer_lt, this->outfile); + } else if (AFTER_HEADER == mode) { + putcu8(tc->outer_left, this->outfile); + } else if (INTERNAL == mode) { + putcu8(tc->inner_left, this->outfile); + } else { + putcu8(tc->outer_lb, this->outfile); + } + } else { + if (TOP == mode) { + putcu8(tc->inner_top, this->outfile); + } else if (AFTER_HEADER == mode) { + putcu8(tc->outer_x, this->outfile); + } else if (INTERNAL == mode) { + putcu8(tc->inner_x, this->outfile); + } else { + putcu8(tc->inner_bottom, this->outfile); + } + } + for (i = 0 ; i != vector_get(&prv->col_w, j) + 2; ++i) { + if (INTERNAL == mode) { + putcu8(tc->inner_horiz, this->outfile); + } else { + putcu8(tc->outer_horiz, this->outfile); + } + } + } + + if (TOP == mode) { + putcu8(tc->outer_rt, this->outfile); + } else if (AFTER_HEADER == mode) { + putcu8(tc->outer_right, this->outfile); + } else if (INTERNAL == mode) { + putcu8(tc->inner_right, this->outfile); + } else { + putcu8(tc->outer_rb, this->outfile); + } + putc('\n', this->outfile); + return 0; +} + +/** + * configure formatting attribute, if DEC-style formatting is supported by + * output terminal. otherwise don't print anything. + * @param this formatter object + * @param attr formatting attribute to be set (color, bold etc...) + * @return 0 when no error happened. + */ +static int conditional_fmt(struct view_t *this, int attr) { + struct text_out_prv *prv = this->ctx.text_prv; + if (prv->dec_fmt) { + if (fprintf(this->outfile, SET_ATTR, attr)) { + return 0; + } else { + return 1; + } + } else { + return 0; + } +} + +/** + * @brief return true if cell is a decimal number or %. Signed numbers ae NOT + * recognized. + */ +static bool isnumber(const char *str) +{ + int num_dots = 0; + const char *c = str; + do { + if (isdigit(*c)) { + continue; + } else if ('.' == *c) { + if (num_dots++ || c==str) { + return false; /* more than one '.' within + string, or '.' as a first + characterr */ + } + } else if ('%' == *c) { + if (c[1] || c==str || c[-1]=='.' ) { + return false; /* '%' occured and it is not + the last character or '%' + as a first character */ + } + } else { + return false; /* character outside of set [0-9%.] */ + } + + } while (*(++c)); + return true; +} + +static void print_spaces(FILE *outfile, int spaces_no) +{ + int i; + for (i = 0; i < spaces_no; i++) { + fputc(' ', outfile); + } + return; +} + +static int calculate_total_width(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int i; + int w = prv->num_cols; + int result = 0; + for (i = 0 ; i != w ; ++i) { + result += vector_get(&prv->col_w, i); + } + return result; +} + +static int get_window_width() +{ + struct winsize w; + if (getenv("CASADM_NO_LINE_BREAK")) { + return MAX_STR_LEN; + } else if (ioctl(0, TIOCGWINSZ, &w)) { + char *cols = getenv("COLUMNS"); + int ncols; + if (cols && str_to_int(cols, NULL, &ncols)) { + return ncols; + } else { + return 80; + /* return default width of 80 + if actual width of screen + cannot be determined */ + } + } else { + return w.ws_col; + } +} + +/** + * reconstruct entire cell even if it is splitted into "rows" + * due to line breaks; + */ +static char* get_entire_cell(struct view_t *this, int i, int j) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int w = table_get_width(prv->t); + int k; + int t; + int buffer_len = 0; + /* calculate buffer length required */ + for (k = j % prv->num_cols ; k < w; k += prv->num_cols) { + char *sub_cell = (char*)table_get(prv->t, i, k); + buffer_len += strnlen(sub_cell, MAX_STR_LEN); + } + + /* make sure, that buffer is allocated */ + if (!prv->cell_buffer) { + prv->cell_buffer = malloc(1 + buffer_len); + if (!prv->cell_buffer) { + return 0; + } + prv->cell_buffer_size = buffer_len; + } else if (prv->cell_buffer_size <= buffer_len) { + char *tmp = realloc(prv->cell_buffer, + 1 + buffer_len); + if (tmp) { + prv->cell_buffer = tmp; + prv->cell_buffer_size = buffer_len; + } else { + return 0; + } + } + + /* reconstruct cell */ + t = 0; + for (k = j % prv->num_cols ; k < w; k += prv->num_cols) { + char *sub_cell = (char*)table_get(prv->t, i, k); + int len = strnlen(sub_cell, MAX_STR_LEN); + if (len) { + memcpy_s(prv->cell_buffer + t, + buffer_len - t, + sub_cell, len); + } + t += len; + } + + prv->cell_buffer[buffer_len] = 0; + + return prv->cell_buffer; +} + +/** + * finish printing table (upon last row of table) + */ +static int finish_table(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + struct table_draw_characters *tc = &prv->tc; + int i, j; + int w = table_get_width(prv->t); + int h = table_get_height(prv->t); + int half_space; /* half of space around item (for center-alignment) */ + print_table_hr(this, TOP); + for (i = 0 ; i!= h ; ++i) { + + for (j = 0 ; j != w ; ++j) { + char *cell_text = (char*)table_get(prv->t, i, j); + char *cell = get_entire_cell(this, i, j); + int cell_len; + int cell_text_len = strnlen(cell_text, MAX_STR_LEN); + half_space = 0; + + if (!cell) { + return FAILURE; + } + + cell_len = strnlen(cell, MAX_STR_LEN); + + /* 0 == j % means first column */ + if (0 == j % prv->num_cols) { + putcu8(tc->outer_vert, this->outfile); + } else { + putcu8(tc->inner_vert, this->outfile); + } + /* digits are right aligned */ + + if (isnumber(cell)) { + print_spaces(this->outfile, + vector_get(&prv->col_w, + j % prv->num_cols) - + cell_text_len); + conditional_fmt(this, NUMBER_COLOR); + + /* highlight first column */ + } else if (0 == j % prv->num_cols) { + conditional_fmt(this, ATTR_BRIGHT); + + /* handle table headers specially */ + } else if (vector_get(&prv->row_types, i) == TABLE_HEADER) { + half_space = (vector_get(&prv->col_w, j) + - cell_text_len) / 2; + + if ('[' == cell[0] && ']' == cell[cell_len-1]) { + if (j < prv->num_cols) { + cell_text += 1; + cell_text_len --; + } + if (cell_text[cell_text_len - 1] == ']') { + cell_text[cell_len - 2] = 0; + cell_text_len --; + } + cell_len -= 2; + } + print_spaces(this->outfile, half_space); + conditional_fmt(this, ATTR_BRIGHT); + + } else { + + if (cell[0]=='[' && cell[cell_len-1]==']') { + conditional_fmt(this, UNIT_COLOR); + if (j < prv->num_cols) { + cell_text += 1; + cell_text_len --; + } + if (cell_text[cell_text_len - 1] == ']') { + cell_text[cell_len - 2] = 0; + cell_text_len --; + } + } + + } + + putc(' ', this->outfile); + if (cell_text_len != fwrite(cell_text, 1, cell_text_len, + this->outfile)) { + abort(); + } + putc(' ', this->outfile); + + if (!isnumber(cell)) { + print_spaces(this->outfile, + vector_get(&prv->col_w, + j % prv->num_cols) - + cell_text_len - half_space); + } + conditional_fmt(this, ATTR_RESET); + fflush(this->outfile); + + if (j % prv->num_cols + == prv->num_cols - 1 + || j == w - 1) { + putcu8(tc->outer_vert, this->outfile); + putc('\n', this->outfile); + /* additionally check if anything worth printing is + * in subsequent lines */ + + int k; + bool nothing_more = true; + for (k = j + 1; k != w ; ++k) { + cell = (char*)table_get(prv->t, i, k); + if (cell[0]) { + nothing_more = false; + } + } + if (nothing_more) { + break; + } + } + } + + if (vector_get(&prv->row_types, i) == TABLE_HEADER) { + print_table_hr(this, AFTER_HEADER); + + } else if (i + 1 < h && + vector_get(&prv->row_types, i+1) == TABLE_SECTION) { + print_table_hr(this, INTERNAL); + + } + + fflush(this->outfile); + } + print_table_hr(this, BOTTOM); + return 0; +} + +/** + * finish printing table (upon last row of table) + */ +static int finish_tree(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + struct table_draw_characters *tc = &prv->tc; + int i, j; + int w = table_get_width(prv->t); + int h = table_get_height(prv->t); + for (i = 0 ; i!= h ; ++i) { + + for (j = 0 ; j != w ; ++j) { + char *mother_cell = (char*)table_get(prv->t, i, + j % prv->num_cols); + char *cell = (char*)table_get(prv->t, i, j); + int cell_len = strnlen(cell, MAX_STR_LEN); + int out_len = cell_len; + + /* digits are right aligned */ + if (0 == j && + (vector_get(&prv->row_types, i) == TREE_LEAF)) { + if (h - 1 == i || + (i < h - 1 && + vector_get(&prv->row_types, i + 1) + == TREE_BRANCH)) { + putcu8(tc->tree_node, this->outfile); + } else { + putcu8(tc->tree_node_last, + this->outfile); + } + cell_len++; + } + + /* apply bright colors for all rows except leaves */ + if (0 == j || + (vector_get(&prv->row_types, i) != TREE_LEAF)) { + conditional_fmt(this, ATTR_BRIGHT); + } + + if (3 == j) { + if (!strncmp(cell, "Active", MAX_STR_LEN) || + (!strncmp(cell, "Running", MAX_STR_LEN)) || + (!strncmp(cell, "Stopping", MAX_STR_LEN))) { + conditional_fmt(this, FG_COLOR_GREEN); + } + + if (!strncmp(cell, "Inactive", MAX_STR_LEN) || + !strncmp(cell, "Detached", MAX_STR_LEN)) { + conditional_fmt(this, FG_COLOR_RED); + conditional_fmt(this, ATTR_BRIGHT); + } + + if (!strncmp(cell, "Incomplete", MAX_STR_LEN)) { + conditional_fmt(this, FG_COLOR_YELLOW); + conditional_fmt(this, ATTR_BRIGHT); + } + } + + if (isnumber(cell)) { + conditional_fmt(this, NUMBER_COLOR); + } + + if ('/' == mother_cell[0]) { + if (vector_get(&prv->row_types, i) + == TREE_BRANCH) { + conditional_fmt(this, TREE_BRANCH_COLOR); + } else { + conditional_fmt(this, PATH_COLOR); + } + } + + if (out_len != fwrite(cell, + 1, out_len, this->outfile)) { + abort(); + } + + /* for column that is NOT last - fill spaces between + * columns accordingly */ + if (j % prv->num_cols != prv->num_cols - 1) { + print_spaces(this->outfile, + vector_get(&prv->col_w, j + % prv->num_cols) - + cell_len + 3); + } + conditional_fmt(this, ATTR_RESET); + fflush(this->outfile); + + /* for last column or last entry in a row */ + if (j % prv->num_cols == prv->num_cols - 1 + || j == w - 1) { + putc('\n', this->outfile); + /* additionally check if anything worth printing is + * in subsequent lines */ + + int k; + bool nothing_more = true; + for (k = j + 1; k != w ; ++k) { + cell = (char*)table_get(prv->t, i, k); + if (cell[0]) { + nothing_more = false; + } + } + if (nothing_more) { + break; + } + } + + /* for last entry in last column (in case of line breaks) */ + if (j % prv->num_cols == prv->num_cols - 1 + && j != w - 1) { + if (i == h - 1) { + putc(' ', this->outfile); + } else { + putcu8(tc->inner_vert, + this->outfile); + } + } + } + + fflush(this->outfile); + } + return 0; +} + + + +static int print_word_break_lines(struct view_t *this, + char *word, + int word_len, + int screen_width) +{ + struct text_out_prv *prv = this->ctx.text_prv; + if (prv->col_ptr + word_len > screen_width) { + putc('\n', this->outfile); + prv->col_ptr = 1 + vector_get(&prv->col_w, 0); + print_spaces(this->outfile, prv->col_ptr); + } + prv->col_ptr += word_len; + return word_len != fwrite(word, 1, word_len, this->outfile); +} + +static void print_spaces_state(struct view_t *this, + int spaces_no, + int screen_width) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int i; + + if (prv->col_ptr + spaces_no > screen_width) { + putc('\n', this->outfile); + prv->col_ptr = 1 + vector_get(&prv->col_w, 0); + print_spaces(this->outfile, prv->col_ptr); + } else { + prv->col_ptr += spaces_no; + for (i = 0; i < spaces_no; i++) { + fputc(' ', this->outfile); + } + } + return; +} + +static int print_cell_break_lines(struct view_t *this, + char *cell, + int cell_len, + int screen_width) +{ + struct text_out_prv *prv = this->ctx.text_prv; + if (prv->col_ptr + cell_len > screen_width) { + int off = 0; + int word_off = 0; + do { + if (' ' == cell[word_off + off] || + !cell[word_off + off]) { + if (off) { + print_spaces_state(this, 1, screen_width); + } + print_word_break_lines(this, cell + off, + word_off, screen_width); + off += word_off + 1; + word_off = 0; + } else { + word_off ++; + } + } while (off + word_off <= cell_len); + return 0; + } else { + prv->col_ptr += cell_len; + return cell_len != fwrite(cell, 1, cell_len, this->outfile); + } +} + +/** + * finish KV pairs... + */ +static int finish_kvs(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int i,j; + int screen_width = get_window_width(); + + int w = prv->num_cols; + int h = table_get_height(prv->t); + for (i = 0 ; i!= h ; ++i) { + prv->col_ptr = 0; + for (j = 0 ; j != w ; ++j) { + char *cell = table_get(prv->t, i, j); + int cell_len = strnlen(cell, MAX_STR_LEN); + if (j && !table_get(prv->t, i, j)[0]) { + continue; /* don't bother with empty strings */ + } + if (j == 0) { + conditional_fmt(this, ATTR_BRIGHT); + } else if (j==1) { + print_spaces_state(this, + 1, screen_width); + } else if (cell[0] == '[') { + print_spaces_state(this, + 1, screen_width); + conditional_fmt(this, UNIT_COLOR); + } else { + print_cell_break_lines(this, " / ", 3, + screen_width); + } + + if (isdigit(cell[0]) && (isdigit(cell[cell_len-1]) + || '%'==cell[cell_len-1])) { + conditional_fmt(this, NUMBER_COLOR); + } else if ('/'==cell[0]) { + conditional_fmt(this, PATH_COLOR); + } + if (print_cell_break_lines(this, cell, cell_len, + screen_width)) { + abort(); + } + if (j == 0) { + print_spaces_state(this, + vector_get(&prv->col_w, 0) + - cell_len, + screen_width); + } + conditional_fmt(this, ATTR_RESET); + fflush(this->outfile); + } + putc('\n', this->outfile); + fflush(this->outfile); + } + return 0; +} + + +static void set_column_widths(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int i, j; + int w = table_get_width(prv->t); + int h = table_get_height(prv->t); + + vector_resize(&prv->col_w, w); + vector_zero(&prv->col_w); + for (i = 0 ; i!= h ; ++i) { + for (j = 0 ; j != w ; ++j) { + const char *cell = table_get(prv->t, i, j); + int cell_len = strnlen(cell, MAX_STR_LEN); + if (cell[0]=='[' && cell[cell_len-1]==']') { + cell_len -= 2; + } + vector_set(&prv->col_w, j % prv->num_cols, + maxi(vector_get(&prv->col_w, j % prv->num_cols), + cell_len)); + } + } +} + +/* if sum of column widths is less than width of a screen, + * attempt to shrink some + * @param tbl_margin space between left edge and start of cell 0 text + * @param cell_margin space between cells; + */ +static int adjust_column_widths(struct view_t *this, + int cell_margin, int tbl_margin) { + struct text_out_prv *prv = this->ctx.text_prv; + int i, j; + int w = prv->num_cols; + int h = table_get_height(prv->t); + int screen_width = get_window_width(); + int table_width; + int margins_width = (w - 1) * cell_margin + tbl_margin * 2; + int avg_width; + int above_avg_cols = 0; + int excess_width; + if (screen_width < 0) { + return 0; + } + + table_width = calculate_total_width(this); + if (table_width + margins_width <= screen_width) { + return 0; + } + /* perform magic to adjust table to a screen */ + avg_width = table_width / w; + excess_width = table_width + margins_width - screen_width; + for (i = 0 ; i != w ; ++i) { + if (vector_get(&prv->col_w, i) > avg_width) { + above_avg_cols ++; + } + } + + for (i = 0 ; i != w ; ++i) { + int this_width = vector_get(&prv->col_w, i); + if (this_width > avg_width) { + int reduce_by = excess_width / above_avg_cols; + vector_set(&prv->col_w, i, this_width - reduce_by); + above_avg_cols --; + excess_width -= reduce_by; + } + } + + /* proper widths set */ + /* now proceed with line breaking */ + for (i = 0 ; i!= h ; ++i) { + for (j = 0 ; j != w ; ++j) { + char *field = (char*)table_get(prv->t, i, j); + int k; + int col_w = vector_get(&prv->col_w, j); + int strlen_f = strnlen(field, MAX_STR_LEN); + int last_breakpoint = 0; + int num_breakpoints = 0; + int breakpoint = 0; /* offset at which line is broken */ + for (k = 0; k != strlen_f ; ++k) { + if (field[k] == '/' || + field[k] == ' ' || + field[k] == '-') { + breakpoint = k; + } + if (k - last_breakpoint >= col_w + && breakpoint > last_breakpoint) { + if (k < strlen_f && breakpoint && + table_set(prv->t, i, + j + w * + (1 + num_breakpoints), + field + breakpoint)) { + return 1; + } + field[breakpoint] = 0; + if (last_breakpoint) { + ((char*)table_get(prv->t, i, + j + w * num_breakpoints)) + [breakpoint - last_breakpoint] = 0; + } + last_breakpoint = breakpoint; + num_breakpoints ++; + } + + } + } + } + + table_set_width(prv->t, ((table_get_width(prv->t) + w - 1) / w ) * w); + set_column_widths(this); + return 0; +} + +static int finish_structured_data(struct view_t *this) { + struct text_out_prv *prv = this->ctx.text_prv; + int w = table_get_width(prv->t); + prv->num_cols = w; + set_column_widths(this); + + if (vector_get(&prv->row_types, 0) == KV_PAIR) { + finish_kvs(this); + } else if (vector_get(&prv->row_types, 0) == TABLE_HEADER) { + adjust_column_widths(this, 3, 4); + finish_table(this); + } else if (vector_get(&prv->row_types, 0) == TREE_HEADER) { + adjust_column_widths(this, 3, 0); + finish_tree(this); + } + + table_reset(prv->t); + vector_resize(&prv->row_types, 0); + return 0; +} + +/** + * handle single line of text in intermediate format. (already split&parsed). + * params as per interface. + */ +int text_process_row(struct view_t *this, int type, int num_fields, char *fields[]) +{ + int i; + struct text_out_prv *prv = this->ctx.text_prv; + int table_h = table_get_height(prv->t); + + switch (type) { + case FREEFORM: + if (table_h) { + finish_structured_data(this); + } + + conditional_fmt(this, ATTR_BRIGHT); + for (i = 0; i!= num_fields; ++i) { + fprintf(this->outfile, "%s", fields[i]); + fflush(this->outfile); + } + conditional_fmt(this, ATTR_RESET); + putc('\n', this->outfile); + break; + case DATA_SET: + case RECORD: + if (table_h) { + finish_structured_data(this); + if (table_h) { + putc('\n', this->outfile); + } + table_h = 0; + } + break; + default: + if (table_h && (TABLE_HEADER == type || + (vector_get(&prv->row_types, 0) == KV_PAIR + && type != KV_PAIR))) { + finish_structured_data(this); + table_h = 0; + putc('\n', this->outfile); + } + + for (i = 0; i!= num_fields; ++i) { + if (table_set(prv->t, table_h, i, fields[i])) { + return 1; + } + } + vector_push_back(&prv->row_types, type); + break; + } + + return 0; +} + +/** + * @handles closing file. + */ +int text_end_input(struct view_t *this) +{ + struct text_out_prv *prv = this->ctx.text_prv; + int table_h = table_get_height(prv->t); + if (table_h) { + finish_structured_data(this); + } + return 0; +} diff --git a/casadm/statistics_view_text.h b/casadm/statistics_view_text.h new file mode 100644 index 000000000..87a44d967 --- /dev/null +++ b/casadm/statistics_view_text.h @@ -0,0 +1,18 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __STATS_VIEW_TEXT +#define __STATS_VIEW_TEXT + +int text_process_row(struct view_t *this, int type, int num_fields, char *fields[]); + +int text_end_input(struct view_t *this); + +int text_construct(struct view_t *this); + +int text_destruct(struct view_t *this); + + +#endif diff --git a/casadm/table.c b/casadm/table.c new file mode 100644 index 000000000..1f28dbe89 --- /dev/null +++ b/casadm/table.c @@ -0,0 +1,231 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include +#include "table.h" +#include "safeclib/safe_str_lib.h" +#include + +#define MIN_STR_SIZE 64 + +struct table_row +{ + int width; + int max_width; + char **cells; +}; + + +struct table +{ + int width; + int height; + int max_height; + struct table_row *r; +}; + +struct table *table_alloc() +{ + struct table *out = malloc(sizeof(*out)); + if (!out) { + return NULL; + } + + out->width = 0; + out->height = 0; + out->max_height = 0; + + out->r = 0; + return out; +} + +void table_free(struct table *t) +{ + int i, j; + if (t->r) { + for (i = 0 ; i!= t->max_height; ++i) { + if (t->r[i].cells) { + for (j = 0 ; j!= t->r[i].max_width; ++j) { + if (t->r[i].cells[j]) { + free(t->r[i].cells[j]); + } + } + free(t->r[i].cells); + } + } + free(t->r); + } + free(t); +} +int table_reset(struct table *t) +{ + int i,j; + if (t->r) { + for (i = 0 ; i!= t->max_height; ++i) { + if (t->r[i].cells) { + for (j = 0 ; j!= t->r[i].max_width; ++j) { + if (t->r[i].cells[j]) { + (t->r[i].cells[j])[0] = 0; + } + } + } + t->r[i].width = 0; + } + } + t->width = 0; + t->height = 0; + return 0; +} + + +int maxi(int x, int y) +{ + if (x > y) { + return x; + } else { + return y; + } +} + +char *table_get(struct table *t,int y, int x) +{ + static const char * empty=""; + if (y >= t->height || x >= t->width) { + assert(0); + abort(); + return (char*)empty; + } + + /* within assigned boundaries but without allocated boundaries */ + if (y >= t->max_height) { + return (char*)empty; + } + + if (x >= t->r[y].max_width) { + return (char*)empty; + } + + if (!t->r[y].cells) { + return (char*)empty; + } + + if (!t->r[y].cells[x]) { + return (char*)empty; + } + + return t->r[y].cells[x]; +} + +int table_set(struct table *t, int y, int x, char *c) +{ + int i; + int len = strnlen(c, MAX_STR_LEN); + if (len >= MAX_STR_LEN) { + return 1; + } + + /* step 1: ensure that space for row y is allocated */ + if (!t->r) { + t->r = calloc(sizeof(struct table_row), y + 1); + if (!t->r) { + return 1; + } + t->max_height = y + 1; + } else if (t->max_height <= y) { + struct table_row *tmp; + int new_m_h = t->max_height*2; + if (new_m_h <= y) { + new_m_h = y+1; + } + + tmp = realloc(t->r, sizeof(struct table_row)*new_m_h); + if (!tmp) { + return 1; + } + + t->r=tmp; + for (i = t->max_height; i!= new_m_h; ++i) { + t->r[i].width = t->r[i].max_width = 0; + t->r[i].cells = 0; + } + t->max_height = new_m_h; + + } /* else everything is OK */ + + /* step 2: ensure that column x within row y is allocated */ + if (!t->r[y].cells) { + t->r[y].cells = calloc(sizeof(char*), x + 1); + t->r[y].max_width = x + 1; + } else if (t->r[y].max_width <= x) { + char **tmp; + int new_m_w = t->r[y].max_width*2; + if (new_m_w <= x) { + new_m_w = x+1; + } + + tmp = realloc(t->r[y].cells, sizeof(char*)*new_m_w); + if (!tmp) { + return 1; + } + + t->r[y].cells = tmp; + memset(&tmp[t->r[y].max_width], 0, + sizeof(char*)*(new_m_w-t->r[y].max_width)); + t->r[y].max_width = new_m_w; + } + + /* step 3: allocate space for string to be contained in cell */ + if (t->r[y].cells[x] && len+1>MIN_STR_SIZE) { + char *tmp = realloc(t->r[y].cells[x], len+1); + if (!tmp) { + return 1; + } + t->r[y].cells[x] = tmp; + + } else if (!t->r[y].cells[x]){ + t->r[y].cells[x] = malloc(maxi(MIN_STR_SIZE,len+1)); + if (!t->r[y].cells[x]) { + return 1; + } + } + + /* step 4: actually overwrite contents of a cell */ + strncpy_s(t->r[y].cells[x], len + 1, c, len); + + /* step 5: update width and height of a table */ + + t->height = maxi(t->height, y + 1); + t->width = maxi(t->width, x + 1); + t->r[y].width = maxi(t->r[y].width, x + 1); + return 0; +} + +/** + * get last available row of table that was added either via + */ +int table_get_width(struct table *t) +{ + return t->width; +} + +int table_get_height(struct table *t) +{ + return t->height; +} + +int table_set_height(struct table *t, int h) +{ + t->height = h; + return 0; +} + + +int table_set_width(struct table *t, int h) +{ + t->width = h; + return 0; +} + diff --git a/casadm/table.h b/casadm/table.h new file mode 100644 index 000000000..b99165a2a --- /dev/null +++ b/casadm/table.h @@ -0,0 +1,58 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __TABLE_H +#define __TABLE_H + +struct table; + +/** + * setup "table" structure. + */ +struct table *table_alloc(); + +/** + * deallocate table. + */ +void table_free(struct table *t); + +/** + * max value of two integers + */ +int maxi(int x, int y); + +/** + * retrieve a field of a table + */ +char *table_get(struct table *t,int y, int x); + +int table_set(struct table *t, int y, int x, char *c); + +/** + * reduce number of columns and rows to 0; + */ +int table_reset(struct table *t); + +/** + * get last available column of table that was added via table_set + */ +int table_get_width(struct table *t); + +/** + * get last available row of table that was added either via table_set or table_set_height + */ +int table_get_height(struct table *t); + +/** + * set height of a table (additional rows will contain empty strings + */ +int table_set_height(struct table *t, int h); +/** + * set with of a table (additional rows will contain empty strings + */ +int table_set_width(struct table *t, int h); + +#endif diff --git a/casadm/upgrade.c b/casadm/upgrade.c new file mode 100644 index 000000000..b6de0845a --- /dev/null +++ b/casadm/upgrade.c @@ -0,0 +1,40 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include +#include +#include +#include +#include "cas_lib.h" +#include "cas_lib_utils.h" +#include + +extern cas_printf_t cas_printf; + +int upgrade_start() +{ + int fd; + struct kcas_upgrade cmd_info; + + if ((fd = open_ctrl_device()) == -1) { + return -1; + } + + if (run_ioctl_interruptible(fd, KCAS_IOCTL_UPGRADE, &cmd_info, + "Starting upgrade", 0, OCF_CORE_ID_INVALID) < 0) { + close(fd); + if (OCF_ERR_FLUSHING_INTERRUPTED == cmd_info.ext_err_code) { + return INTERRUPTED; + } else { + cas_printf(LOG_ERR, "Error starting upgrade\n"); + print_err(cmd_info.ext_err_code); + return FAILURE; + } + } + + close(fd); + return SUCCESS; +} diff --git a/casadm/upgrade.h b/casadm/upgrade.h new file mode 100644 index 000000000..7d9ceb520 --- /dev/null +++ b/casadm/upgrade.h @@ -0,0 +1,11 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef _UPGRADE_H +#define _UPGRADE_H + +int upgrade_start(); + +#endif diff --git a/casadm/vt100codes.h b/casadm/vt100codes.h new file mode 100644 index 000000000..bc1cef238 --- /dev/null +++ b/casadm/vt100codes.h @@ -0,0 +1,119 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +/* General setup */ +#define RESET_DEVICE "\033c" +//! enable line wrapping +#define ENABLE_LINE_WRAP "\x1b[7h" +//! disable it +#define DISABLE_LINE_WRAP "\x1b[7l" + +/* Scrolling options. Note: there is no way to disable scrolling */ +//! Whole screen is scrolled on SCROLL_UP/SCROLL_DOWN +#define SCROLL_ENTIRE_SCREEN "\x1b[r" +//! Only rows from A to B are scrolled on SCROLL_UP/SCROLL_DOWN, anything above A or below B is not scrolled +#define SCROLL_SCREEN_REGION(A,B) "\x1b[" (A) ";" (B) "r" + +//! scroll up +#define SCROLL_UP "\x1b[M" +//! scroll down +#define SCROLL_DOWN "\x1b[D" + +//! make cursor invisible - xterm +#define HIDE_CURSOR "\x1b[?25l" + +//! restore it -xterm +#define SHOW_CURSOR "\x1b[?25h" + +/* Absolute cursor positioning. */ +//! Set cursor position to left-top position +#define CURSOR_HOME "\x1b[H" +//! Set cursor position to specific y/x (note: y = 1..height, x = 1..width) +#define CURSOR_YX "\x1b[%d;%dH" +/* Relative cursor positioning. */ +//! move cursor one position up +#define CURSOR_UP "\x1b[A" +//! move cursor n positions up +#define CURSOR_UP_N "\x1b[%dA" +//! move cursor one position down +#define CURSOR_DOWN "\x1b[B" +//! move cursor n positions down +#define CURSOR_DOWN_N "\x1b[%dB" +//! move cursor one position forward +#define CURSOR_FORWARD "\x1b[C" +//! move cursor n positions forward +#define CURSOR_FORWARD_N "\x1b[%dC" +//! move cursor one position backward +#define CURSOR_BACKWARD "\x1b[D" +//! move cursor n positions backward +#define CURSOR_BACKWARD_N "\x1b[%dD" +/* Unsave restores position after last save. */ +//! One cursor position may be saved +#define SAVE_CURSOR "\x1b[s" +//! and restored +#define UNSAVE_CURSOR "\x1b[u" + +/* Erase screen. */ +//! Erase whole screen +#define ERASE "\x1b[2J" +//! same as above +#define ERASE_SCREEN ERASE +//! erase above cursor +#define ERASE_UP "\x1b[1J" +//! erase below cursor +#define ERASE_DOWN "\x1b[J" + + +#define INSERT_MODE "\x1b[4h" +#define REPLACE_MODE "\x1b[4l" +/* Erase line. */ +//! erase current line +#define ERASE_LINE "\x1b[K" +//! erase current line left from the cursor +#define ERASE_START_OF_LINE "\x1b[1K" +//! erase current line right from the cursor +#define ERASE_END_OF_LINE "\x1b[K" + +/* a = one of following 23 attributes*/ +//! set specific attribute +#define SET_ATTR "\x1b[%dm" +//! if you have to set more attributes, separate them by ";" +#define AND_ATTR ";" +/*generalattributes (0-8 without 3 and 6) */ +//!resets terminal defaults +#define ATTR_RESET 0 +//!sets brighter fg color +#define ATTR_BRIGHT 1 +//!turns off bright (sets darker fg color) note: not supported by most of platforms +#define ATTR_DIM 2 +//!turns on text underline (not supported by MS Windows) +#define ATTR_UNDERSCORE 4 +//!turns on blink (Not supported by MS Windows, most of other implementations incompatible) +#define ATTR_BLINK 5 +//! Inverts bg and fg color (incompatible implementation on MS windows)*/ +#define ATTR_REVERSE 7 + +#define ATTR_HIDDEN 8 /*???*/ + +/*Foreground (text) colours*/ +#define FG_COLOR_BLACK 30 +#define FG_COLOR_RED 31 +#define FG_COLOR_GREEN 32 +#define FG_COLOR_YELLOW 33 +#define FG_COLOR_BLUE 34 +#define FG_COLOR_MAGENTA 35 +#define FG_COLOR_CYAN 36 +#define FG_COLOR_WHITE 37 + +/*Background colors*/ +#define BG_COLOR_BLACK 40 +#define BG_COLOR_RED 41 +#define BG_COLOR_GREEN 42 +#define BG_COLOR_YELLOW 43 +#define BG_COLOR_BLUE 44 +#define BG_COLOR_MAGENTA 45 +#define BG_COLOR_CYAN 46 +#define BG_COLOR_WHITE 47 + diff --git a/modules/CAS_VERSION_GEN b/modules/CAS_VERSION_GEN new file mode 100755 index 000000000..f9c051ac9 --- /dev/null +++ b/modules/CAS_VERSION_GEN @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +VER_FILE=CAS_VERSION + +which git > /dev/null 2>&1 +if [ $? -eq 0 ] && [ -e ../../../.git ]; then + echo "Generating ${VER_FILE} from git revision." + echo "" + VERSION=`git describe HEAD 2>/dev/null` + + CAS_VERSION_MAIN=`echo ${VERSION} | cut -d '.' -f 1 | awk '{print substr($0, 2)}'` + CAS_VERSION_MAJOR=`echo ${VERSION} | cut -d '.' -f 2 | awk '{print substr($0, 2)}'` + CAS_VERSION_MINOR=`echo ${VERSION} | cut -d '.' -f 3 | awk '{print substr($0, 2)}'` + CAS_BUILD_NO=`echo ${VERSION} | cut -d '.' -f 4 | cut -d '-' -f 1` + CAS_BUILD_FLAG=`echo ${VERSION} | cut -d '.' -f 4 | cut -s -d '-' -f 3` + + rm -f ${VER_FILE} + touch ${VER_FILE} + + echo "CAS_VERSION_MAIN=${CAS_VERSION_MAIN}" >> ${VER_FILE} + echo "CAS_VERSION_MAJOR=${CAS_VERSION_MAJOR}" >> ${VER_FILE} + echo "CAS_VERSION_MINOR=${CAS_VERSION_MINOR}" >> ${VER_FILE} + echo "CAS_BUILD_NO=${CAS_BUILD_NO}" >> ${VER_FILE} + echo "CAS_BUILD_FLAG=${CAS_BUILD_FLAG}" >> ${VER_FILE} +elif [ -f ${VER_FILE} ]; then + echo "Using existing ${VER_FILE} version file." + echo "" +else + echo "No ${VER_FILE} found. Preparing default version file." + echo "" + + CAS_VERSION_MAIN=19 + CAS_VERSION_MAJOR=3 + CAS_VERSION_MINOR=0 + CAS_BUILD_NO=0000`date +%m%d` + CAS_BUILD_FLAG= + + touch ${VER_FILE} + + echo "CAS_VERSION_MAIN=${CAS_VERSION_MAIN}" >> ${VER_FILE} + echo "CAS_VERSION_MAJOR=${CAS_VERSION_MAJOR}" >> ${VER_FILE} + echo "CAS_VERSION_MINOR=${CAS_VERSION_MINOR}" >> ${VER_FILE} + echo "CAS_BUILD_NO=${CAS_BUILD_NO}" >> ${VER_FILE} + echo "CAS_BUILD_FLAG=${CAS_BUILD_FLAG}" >> ${VER_FILE} +fi + +cat ${VER_FILE} diff --git a/modules/Makefile b/modules/Makefile new file mode 100644 index 000000000..87d2b06c7 --- /dev/null +++ b/modules/Makefile @@ -0,0 +1,76 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# +# If KERNELRELEASE is defined, we've been invoked from the +# kernel build system and can use its language. +ifneq ($(KERNELRELEASE),) + +include $(M)/config.mk + +obj-y += cas_cache/ +obj-y += cas_disk/ + +# Otherwise we were called directly from the command +# line; invoke the kernel build system. +else + +VERSION_FILE=$(PWD)/CAS_VERSION + +OCFDIR=$(PWD)/../ocf +KERNEL_DIR ?= "/lib/modules/$(shell uname -r)/build" +PWD=$(shell pwd) +KERNEL_VERSION := $(shell uname -r) +MODULES_DIR=/lib/modules/$(KERNEL_VERSION)/extra + +DISK_MODULE = cas_disk +CACHE_MODULE = cas_cache + +DEPMOD:=$(shell which depmod) +RMMOD :=$(shell which rmmod) +MODPROBE:=$(shell which modprobe) + +all: default + +$(VERSION_FILE): + ./CAS_VERSION_GEN + +# Extra targets and file configuration +ifneq ($(wildcard $(PWD)/extra.mk),) +include $(PWD)/extra.mk +else +sync distsync: +endif + +default: $(VERSION_FILE) sync + cd $(KERNEL_DIR) && $(MAKE) M=$(PWD) modules + +clean: + cd $(KERNEL_DIR) && make M=$(PWD) clean + +distclean: clean distsync + +install: + @echo "Installing Open-CAS modules" + @install -m 755 -d $(MODULES_DIR) + @install -m 744 cas_disk/$(DISK_MODULE).ko $(MODULES_DIR)/$(DISK_MODULE).ko + @install -m 744 cas_cache/$(CACHE_MODULE).ko $(MODULES_DIR)/$(CACHE_MODULE).ko + + @$(DEPMOD) + @$(MODPROBE) $(CACHE_MODULE) + +uninstall: + @echo "Uninstalling Open-CAS modules" + @$(RMMOD) $(CACHE_MODULE) + @$(RMMOD) $(DISK_MODULE) + + @rm $(MODULES_DIR)/$(CACHE_MODULE).ko + @rm $(MODULES_DIR)/$(DISK_MODULE).ko + + @$(DEPMOD) + +reinstall: uninstall install + +.PHONY: all default clean distclean sync distsync install uninstall + +endif diff --git a/modules/README b/modules/README new file mode 100644 index 000000000..c57f91a06 --- /dev/null +++ b/modules/README @@ -0,0 +1,17 @@ +Open CAS accelerates Linux applications by caching active (hot) data to +a local flash device inside servers. Open CAS implements caching at the +server level, utilizing local high-performance flash media as the cache drive +media inside the application server as close as possible to the CPU, thus +reducing storage latency as much as possible. +The Open Cache Acceleration Software installs into the GNU/Linux operating +system itself, as a kernel module. The nature of the integration provides a +cache solution that is transparent to users and applications, and your +existing storage infrastructure. No storage migration effort or application +changes are required. + +Open CAS is distributed on Dual BSD-2-Clause-Patent/GPLv2 license (see +https://opensource.org/licenses/BSDplusPatent and +https://opensource.org/licenses/GPL-2.0 for for full license texts). + +Open CAS uses Safe string library (safeclib) that is MIT licensed. + diff --git a/modules/cas_cache/.gitignore b/modules/cas_cache/.gitignore new file mode 100644 index 000000000..2eee3238c --- /dev/null +++ b/modules/cas_cache/.gitignore @@ -0,0 +1,3 @@ +include/ +src/ + diff --git a/modules/cas_cache/Makefile b/modules/cas_cache/Makefile new file mode 100644 index 000000000..c6dd8a9f0 --- /dev/null +++ b/modules/cas_cache/Makefile @@ -0,0 +1,10 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# +include $(M)/config.mk + +obj-m := cas_cache.o + +cas_cache-c = $(shell find $(M)/cas_cache -name \*.c) +cas_cache-objs = $(patsubst $(M)/cas_cache/%.c,%.o,$(cas_cache-c)) diff --git a/modules/cas_cache/cas_cache.h b/modules/cas_cache/cas_cache.h new file mode 100644 index 000000000..dda492832 --- /dev/null +++ b/modules/cas_cache/cas_cache.h @@ -0,0 +1,97 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_CACHE_H__ +#define __CAS_CACHE_H__ + +#include "ocf/ocf.h" +#include "ocf_env.h" + +#include +#include + +#include "linux_kernel_version.h" +#include "layer_upgrade.h" +#include "control.h" +#include "layer_cache_management.h" +#include "service_ui_ioctl.h" +#include "utils/cas_cache_utils.h" +#include "volume/vol_blk_utils.h" +#include "classifier.h" +#include "context.h" +#include + +#define CAS_KERN_EMERG KERN_EMERG OCF_PREFIX_SHORT +#define CAS_KERN_ALERT KERN_ALERT OCF_PREFIX_SHORT +#define CAS_KERN_CRIT KERN_CRIT OCF_PREFIX_SHORT +#define CAS_KERN_ERR KERN_ERR OCF_PREFIX_SHORT +#define CAS_KERN_WARNING KERN_WARNING OCF_PREFIX_SHORT +#define CAS_KERN_NOTICE KERN_NOTICE OCF_PREFIX_SHORT +#define CAS_KERN_INFO KERN_INFO OCF_PREFIX_SHORT +#define CAS_KERN_DEBUG KERN_DEBUG OCF_PREFIX_SHORT + +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif + +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1< + +/* Kernel log prefix */ +#define CAS_CLS_LOG_PREFIX OCF_PREFIX_SHORT"[Classifier]" + +/* Production version logs */ +#define CAS_CLS_MSG(severity, format, ...) \ + printk(severity CAS_CLS_LOG_PREFIX " " format, ##__VA_ARGS__); + +/* Set to 1 to enable debug logs */ +#define CAS_CLASSIFIER_CLS_DEBUG 0 + +#if 1 == CAS_CLASSIFIER_CLS_DEBUG +/* Debug log */ +#define CAS_CLS_DEBUG_MSG(format, ...) \ + CAS_CLS_MSG(KERN_INFO, format, ##__VA_ARGS__) +/* Trace log */ +#define CAS_CLS_DEBUG_TRACE(format, ...) \ + trace_printk(format, ##__VA_ARGS__) + +#else +#define CAS_CLS_DEBUG_MSG(format, ...) +#define CAS_CLS_DEBUG_TRACE(format, ...) +#endif + +/* Done condition test - always accepts and stops evaluation */ +static cas_cls_eval_t _cas_cls_done_test(struct cas_classifier *cls, + struct cas_cls_condition *c, struct cas_cls_io *io, + ocf_part_id_t part_id) +{ + cas_cls_eval_t ret = {.yes = 1, .stop = 1}; + return ret; +} + +/* Metadata condition test */ +static cas_cls_eval_t _cas_cls_metadata_test(struct cas_classifier *cls, + struct cas_cls_condition *c, struct cas_cls_io *io, + ocf_part_id_t part_id) +{ + if (!io->page) + return cas_cls_eval_no; + + if (PageAnon(io->page)) + return cas_cls_eval_no; + + if (PageSlab(io->page) || PageCompound(io->page)) { + /* A filesystem issues IO on pages that does not belongs + * to the file page cache. It means that it is a + * part of metadata + */ + return cas_cls_eval_yes; + } + + if (!io->page->mapping) { + /* XFS case, page are allocated internally and do not + * have references into inode + */ + return cas_cls_eval_yes; + } + + if (!io->inode) + return cas_cls_eval_no; + + if (S_ISBLK(io->inode->i_mode)) { + /* EXT3 and EXT4 case. Metadata IO is performed into pages + * of block device cache + */ + return cas_cls_eval_yes; + } + + if (S_ISDIR(io->inode->i_mode)) { + return cas_cls_eval_yes; + } + + return cas_cls_eval_no; +} + +/* Direct I/O condition test function */ +static cas_cls_eval_t _cas_cls_direct_test(struct cas_classifier *cls, + struct cas_cls_condition *c, struct cas_cls_io *io, + ocf_part_id_t part_id) +{ + if (!io->page) + return cas_cls_eval_no; + + if (PageAnon(io->page)) + return cas_cls_eval_yes; + + return cas_cls_eval_no; +} + +/* Generic condition constructor for conditions without operands (e.g. direct, + * metadata) */ +static int _cas_cls_generic_ctr(struct cas_classifier *cls, + struct cas_cls_condition *c, char *data) +{ + if (data) { + CAS_CLS_MSG(KERN_ERR, "Unexpected operand in condition\n"); + return -EINVAL; + } + return 0; +} + +/* Generic condition destructor */ +static void _cas_cls_generic_dtr(struct cas_classifier *cls, + struct cas_cls_condition *c) +{ + if (c->context) + kfree(c->context); + c->context = NULL; +} + +/* Numeric condition constructor. @data is expected to contain either + * plain number string or range specifier (e.g. "gt:4096"). */ +static int _cas_cls_numeric_ctr(struct cas_classifier* cls, + struct cas_cls_condition *c, char *data) +{ + struct cas_cls_numeric *ctx; + int result; + char *ptr; + + if (!data || strlen(data) == 0) { + CAS_CLS_MSG(KERN_ERR, "Missing numeric condition operand\n"); + return -EINVAL; + } + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->operator = cas_cls_numeric_eq; + + ptr = strpbrk(data, ":"); + if (ptr) { + /* Terminate sub-string containing arithmetic operator */ + *ptr = '\0'; + ++ptr; + + if (!strcmp(data, "eq")) { + ctx->operator = cas_cls_numeric_eq; + } else if (!strcmp(data, "ne")) { + ctx->operator = cas_cls_numeric_ne; + } else if (!strcmp(data, "lt")) { + ctx->operator = cas_cls_numeric_lt; + } else if (!strcmp(data, "gt")) { + ctx->operator = cas_cls_numeric_gt; + } else if (!strcmp(data, "le")) { + ctx->operator = cas_cls_numeric_le; + } else if (!strcmp(data, "ge")) { + ctx->operator = cas_cls_numeric_ge; + } else { + CAS_CLS_MSG(KERN_ERR, "Invalid numeric operator \n"); + result = -EINVAL; + goto error; + } + + } else { + /* Plain number case */ + ptr = data; + } + + result = kstrtou64(ptr, 10, &ctx->v_u64); + if (result) { + CAS_CLS_MSG(KERN_ERR, "Invalid numeric operand\n"); + goto error; + } + + CAS_CLS_DEBUG_MSG("\t\t - Using operator %d with value %llu\n", + ctx->operator, ctx->v_u64); + + c->context = ctx; + return 0; + +error: + kfree(ctx); + return result; +} + +/* Unsigned int numeric test function */ +static cas_cls_eval_t _cas_cls_numeric_test_u( + struct cas_cls_condition *c, uint64_t val) +{ + struct cas_cls_numeric *ctx = c->context; + + switch (ctx->operator) { + case cas_cls_numeric_eq: + return val == ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + case cas_cls_numeric_ne: + return val != ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + case cas_cls_numeric_lt: + return val < ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + case cas_cls_numeric_gt: + return val > ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + case cas_cls_numeric_le: + return val <= ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + case cas_cls_numeric_ge: + return val >= ctx->v_u64 ? cas_cls_eval_yes : cas_cls_eval_no; + } + + return cas_cls_eval_no; +} + +/* Io class test function */ +static cas_cls_eval_t _cas_cls_io_class_test(struct cas_classifier *cls, + struct cas_cls_condition *c, struct cas_cls_io *io, + ocf_part_id_t part_id) +{ + + return _cas_cls_numeric_test_u(c, part_id); +} + +/* File size test function */ +static cas_cls_eval_t _cas_cls_file_size_test( + struct cas_classifier *cls, struct cas_cls_condition *c, + struct cas_cls_io *io, ocf_part_id_t part_id) +{ + if (!io->inode) + return cas_cls_eval_no; + + if (S_ISBLK(io->inode->i_mode)) + return cas_cls_eval_no; + + if (!S_ISREG(io->inode->i_mode)) + return cas_cls_eval_no; + + return _cas_cls_numeric_test_u(c, i_size_read(io->inode)); +} + +/* Resolve path to inode */ +static void _cas_cls_directory_resolve(struct cas_classifier *cls, + struct cas_cls_directory *ctx) +{ + struct path path; + struct inode *inode; + int error; + int o_res; + unsigned long o_ino; + + o_res = ctx->resolved; + o_ino = ctx->i_ino; + + error = kern_path(ctx->pathname, LOOKUP_FOLLOW, &path); + if (error) { + ctx->resolved = 0; + if (o_res) { + CAS_CLS_DEBUG_MSG("Removed inode resolution for %s\n", + ctx->pathname); + } + return; + } + + inode = path.dentry->d_inode; + ctx->i_ino = inode->i_ino; + ctx->resolved = 1; + path_put(&path); + + if (!o_res) { + CAS_CLS_DEBUG_MSG("Resolved %s to inode: %lu\n", ctx->pathname, + ctx->i_ino); + } else if (o_ino != ctx->i_ino) { + CAS_CLS_DEBUG_MSG("Changed inode resolution for %s: %lu => %lu" + "\n", ctx->pathname, o_ino, ctx->i_ino); + } +} + +/* Inode resolving work entry point */ +static void _cas_cls_directory_resolve_work(struct work_struct *work) +{ + struct cas_cls_directory *ctx; + + ctx = container_of(work, struct cas_cls_directory, d_work.work); + + _cas_cls_directory_resolve(ctx->cls, ctx); + + queue_delayed_work(ctx->cls->wq, &ctx->d_work, + msecs_to_jiffies(ctx->resolved ? 5000 : 1000)); +} + +/* Get unaliased dentry for given dir inode */ +static struct dentry *_cas_cls_dir_get_inode_dentry(struct inode *inode) +{ + struct dentry *d = NULL, *iter; + ALIAS_NODE_TYPE *pos; /* alias list current element */ + + if (DENTRY_LIST_EMPTY(&inode->i_dentry)) + return NULL; + + spin_lock(&inode->i_lock); + + if (S_ISDIR(inode->i_mode)) + goto unlock; + + INODE_FOR_EACH_DENTRY(pos, &inode->i_dentry) { + iter = ALIAS_NODE_TO_DENTRY(pos); + spin_lock(&iter->d_lock); + if (!d_unhashed(iter)) + d = iter; + spin_unlock(&d->d_lock); + if (d) + break; + } + +unlock: + spin_unlock(&inode->i_lock); + return d; +} + +/* Directory condition test function */ +static cas_cls_eval_t _cas_cls_directory_test( + struct cas_classifier *cls, struct cas_cls_condition *c, + struct cas_cls_io *io, ocf_part_id_t part_id) +{ + struct cas_cls_directory *ctx; + struct inode *inode, *p_inode; + struct dentry *dentry, *p_dentry; + + ctx = c->context; + inode = io->inode; + + if (!inode || !ctx->resolved) + return cas_cls_eval_no; + + /* I/O target inode dentry */ + dentry = _cas_cls_dir_get_inode_dentry(inode); + if (!dentry) + return cas_cls_eval_no; + + /* Walk up directory tree starting from I/O destination + * dir until current dir inode matches condition inode or top + * directory is reached. */ + while (inode) { + if (inode->i_ino == ctx->i_ino) + return cas_cls_eval_yes; + spin_lock(&dentry->d_lock); + p_dentry = dentry->d_parent; + if (!p_dentry) { + spin_unlock(&dentry->d_lock); + return cas_cls_eval_no; + } + p_inode = p_dentry->d_inode; + spin_unlock(&dentry->d_lock); + if (p_inode != inode) { + inode = p_inode; + dentry = p_dentry; + } else { + inode = NULL; + } + } + + return cas_cls_eval_no; +} + +/* Directory condition constructor */ +static int _cas_cls_directory_ctr(struct cas_classifier *cls, + struct cas_cls_condition *c, char *data) +{ + struct cas_cls_directory *ctx; + + if (!data || strlen(data) == 0) { + CAS_CLS_MSG(KERN_ERR, "Missing directory specifier\n"); + return -EINVAL; + } + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->cls = cls; + ctx->resolved = 0; + ctx->pathname = kstrdup(data, GFP_KERNEL); + if (!ctx->pathname) { + kfree(ctx); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&ctx->d_work, _cas_cls_directory_resolve_work); + queue_delayed_work(cls->wq, &ctx->d_work, + msecs_to_jiffies(10)); + + c->context = ctx; + + return 0; +} + +/* Directory condition destructor */ +static void _cas_cls_directory_dtr(struct cas_classifier *cls, + struct cas_cls_condition *c) +{ + struct cas_cls_directory *ctx; + ctx = c->context; + + if (!ctx) + return; + + cancel_delayed_work_sync(&ctx->d_work); + kfree(ctx->pathname); + kfree(ctx); +} + +/* Array of condition handlers */ +static struct cas_cls_condition_handler _handlers[] = { + { "done", _cas_cls_done_test, _cas_cls_generic_ctr }, + { "metadata", _cas_cls_metadata_test, _cas_cls_generic_ctr }, + { "direct", _cas_cls_direct_test, _cas_cls_generic_ctr }, + { "io_class", _cas_cls_io_class_test, _cas_cls_numeric_ctr, + _cas_cls_generic_dtr }, + { "file_size", _cas_cls_file_size_test, _cas_cls_numeric_ctr, + _cas_cls_generic_dtr }, + { "directory", _cas_cls_directory_test, _cas_cls_directory_ctr, + _cas_cls_directory_dtr }, + { NULL } +}; + +/* Get condition handler for condition string token */ +static struct cas_cls_condition_handler *_cas_cls_lookup_handler( + const char *token) +{ + struct cas_cls_condition_handler *h = _handlers; + + while (h->token) { + if (strcmp(h->token, token) == 0) + return h; + h++; + } + + return NULL; +} + +/* Deallocate condition */ +static void _cas_cls_free_condition(struct cas_classifier *cls, + struct cas_cls_condition *c) +{ + if (c->handler->dtr) + c->handler->dtr(cls, c); + kfree(c); +} + +/* Allocate condition */ +static struct cas_cls_condition * _cas_cls_create_condition( + struct cas_classifier *cls, const char *token, + char *data, int l_op) +{ + struct cas_cls_condition_handler *h; + struct cas_cls_condition *c; + int result; + + h = _cas_cls_lookup_handler(token); + if (!h) { + CAS_CLS_DEBUG_MSG("Cannot find handler for condition" + " %s\n", token); + return ERR_PTR(-ENOENT); + } + + c = kmalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + c->handler = h; + c->context = NULL; + c->l_op = l_op; + + if (c->handler->ctr) { + result = c->handler->ctr(cls, c, data); + if (result) { + kfree(c); + return ERR_PTR(result); + } + } + + CAS_CLS_DEBUG_MSG("\t\t - Created condition %s\n", token); + + return c; +} + +/* Read single codnition from text input and return cas_cls_condition + * representation. *rule pointer is advanced to point to next condition. + * Input @rule string is modified to speed up parsing (selected bytes are + * overwritten with 0). + * + * *l_op contains logical operator from previous condition and gets overwritten + * with operator read from currently parsed condition. + * + * Returns pointer to condition if successfull. + * Returns NULL if no more conditions in string. + * Returns error pointer in case of syntax or runtime error. + */ +static struct cas_cls_condition *_cas_cls_parse_condition( + struct cas_classifier *cls, char **rule, + enum cas_cls_logical_op *l_op) +{ + char *token = *rule; /* Condition token substring (e.g. file_size) */ + char *operand = NULL; /* Operand substring (e.g. "lt:4096" or path) */ + char *ptr; /* Current position in input string */ + char *last = token; /* Last seen substring in condition */ + char op = 'X'; /* Logical operator at the end of condition */ + struct cas_cls_condition *c; /* Output condition */ + + if (**rule == '\0') { + /* Empty condition */ + return NULL; + } + + ptr = strpbrk(*rule, ":&|"); + if (!ptr) { + /* No operands in condition (e.g. "metadata"), no logical + * operators following condition - we're done with parsing. */ + goto create; + } + + if (*ptr == ':') { + /* Operand found - terminate token string and move forward. */ + *ptr = '\0'; + ptr += 1; + operand = ptr; + last = ptr; + + ptr = strpbrk(ptr, "&|"); + if (!ptr) { + /* No operator past condition - create rule and exit */ + goto create; + } + } + + /* Remember operator value and zero target byte to terminate previous + * string (token or operand) */ + op = *ptr; + *ptr = '\0'; + +create: + c = _cas_cls_create_condition(cls, token, operand, *l_op); + *l_op = (op == '|' ? cas_cls_logical_or : cas_cls_logical_and); + + /* Set *rule to character past current condition and logical operator */ + if (ptr) { + /* Set pointer for next iteration */ + *rule = ptr + 1; + } else { + /* Set pointer to terminating zero */ + *rule = last + strlen(last); + } + + return c; +} + +/* Parse all conditions in rule text description. @rule might be overwritten */ +static int _cas_cls_parse_conditions(struct cas_classifier *cls, + struct cas_cls_rule *r, char *rule) +{ + char *start; + struct cas_cls_condition *c; + enum cas_cls_logical_op l_op = cas_cls_logical_or; + + start = rule; + for (;;) { + c = _cas_cls_parse_condition(cls, &start, &l_op); + if (IS_ERR(c)) + return PTR_ERR(c); + if (!c) + break; + + list_add_tail(&c->list, &r->conditions); + } + + return 0; +} + +static struct cas_classifier* cas_get_classifier(ocf_cache_t cache) +{ + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + ENV_BUG_ON(!cache_priv); + return cache_priv->classifier; +} + +static void cas_set_classifier(ocf_cache_t cache, + struct cas_classifier* cls) +{ + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + ENV_BUG_ON(!cache_priv); + cache_priv->classifier = cls; +} + +void _cas_cls_rule_destroy(struct cas_classifier *cls, + struct cas_cls_rule *r) +{ + struct list_head *item, *n; + struct cas_cls_condition *c = NULL; + + if (!r) + return; + + list_for_each_safe(item, n, &r->conditions) { + c = list_entry(item, struct cas_cls_condition, list); + list_del(item); + _cas_cls_free_condition(cls, c); + } + + kfree(r); +} + +/* Destroy rule */ +void cas_cls_rule_destroy(ocf_cache_t cache, struct cas_cls_rule *r) +{ + struct cas_classifier *cls = cas_get_classifier(cache); + BUG_ON(!cls); + _cas_cls_rule_destroy(cls, r); +} + +/* Create rule from text description. @rule might be overwritten */ +static struct cas_cls_rule *_cas_cls_rule_create(struct cas_classifier *cls, + ocf_part_id_t part_id, char *rule) +{ + struct cas_cls_rule *r; + int result; + + if (part_id == 0 || rule[0] == '\0') + return NULL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return ERR_PTR(-ENOMEM); + + r->part_id = part_id; + INIT_LIST_HEAD(&r->conditions); + result = _cas_cls_parse_conditions(cls, r, rule); + if (result) { + _cas_cls_rule_destroy(cls, r); + return ERR_PTR(result); + } + + return r; +} + +/* Update rule associated with given io class */ +void cas_cls_rule_apply(ocf_cache_t cache, + ocf_part_id_t part_id, struct cas_cls_rule *new) +{ + struct cas_classifier *cls; + struct cas_cls_rule *old = NULL, *elem; + struct list_head *item, *_n; + + cls = cas_get_classifier(cache); + BUG_ON(!cls); + + write_lock(&cls->lock); + + /* Walk through list of rules in reverse order (tail to head), visiting + * rules from high to low part_id */ + list_for_each_prev_safe(item, _n, &cls->rules) { + elem = list_entry(item, struct cas_cls_rule, list); + + if (elem->part_id == part_id) { + old = elem; + list_del(item); + } + + if (elem->part_id < part_id) + break; + } + + /* Insert new element past loop cursor */ + if (new) + list_add(&new->list, item); + + write_unlock(&cls->lock); + + _cas_cls_rule_destroy(cls, old); + + if (old) + CAS_CLS_DEBUG_MSG("Removed rule for class %d\n", part_id); + if (new) + CAS_CLS_DEBUG_MSG("New rule for for class %d\n", part_id); + + return; +} + +/* + * Translate classification rule error from linux error code to CAS error code. + * Internal classifier functions use PTR_ERR / ERR_PTR macros to propagate + * error in pointers. These macros do not work well with CAS error codes, so + * this function is used to form fine-grained CAS error code when returning + * from classifier management function. + */ +static int _cas_cls_rule_err_to_cass_err(int err) +{ + switch (err) { + case -ENOENT: + return KCAS_ERR_CLS_RULE_UNKNOWN_CONDITION; + case -EINVAL: + return KCAS_ERR_CLS_RULE_INVALID_SYNTAX; + default: + return err; + } +} + +/* Create and apply classification rule for given class id */ +static int _cas_cls_rule_init(ocf_cache_t cache, ocf_part_id_t part_id) +{ + struct cas_classifier *cls; + struct ocf_io_class_info *info; + struct cas_cls_rule *r; + int result; + + cls = cas_get_classifier(cache); + if (!cls) + return -EINVAL; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + result = ocf_cache_io_class_get_info(cache, part_id, info); + if (result) { + if (result == -OCF_ERR_IO_CLASS_NOT_EXIST) + result = 0; + goto exit; + } + + if (strnlen(info->name, sizeof(info->name)) == sizeof(info->name)) { + CAS_CLS_MSG(KERN_ERR, "IO class name not null terminated\n"); + result = -EINVAL; + goto exit; + } + + r = _cas_cls_rule_create(cls, part_id, info->name); + if (IS_ERR(r)) { + result = _cas_cls_rule_err_to_cass_err(PTR_ERR(r)); + goto exit; + } + + cas_cls_rule_apply(cache, part_id, r); + +exit: + kfree(info); + return result; +} + +/* Create classification rule from text description */ +int cas_cls_rule_create(ocf_cache_t cache, + ocf_part_id_t part_id, const char* rule, + struct cas_cls_rule **cls_rule) +{ + struct cas_cls_rule *r = NULL; + struct cas_classifier *cls; + char *_rule; + int ret; + + if (!cls_rule) + return -EINVAL; + + cls = cas_get_classifier(cache); + if (!cls) + return -EINVAL; + + if (strnlen(rule, OCF_IO_CLASS_NAME_MAX) == OCF_IO_CLASS_NAME_MAX) { + CAS_CLS_MSG(KERN_ERR, "IO class name not null terminated\n"); + return -EINVAL; + } + + /* Make description copy as _cas_cls_rule_create might modify input + * string */ + _rule = kstrdup(rule, GFP_KERNEL); + if (!_rule) + return -ENOMEM; + + r = _cas_cls_rule_create(cls, part_id, _rule); + if (IS_ERR(r)) + ret = _cas_cls_rule_err_to_cass_err(PTR_ERR(r)); + else { + CAS_CLS_DEBUG_MSG("Created rule: %s => %d\n", rule, part_id); + *cls_rule = r; + ret = 0; + } + + kfree(_rule); + return ret; +} + +/* Deinitialize classifier and remove rules */ +void cas_cls_deinit(ocf_cache_t cache) +{ + struct cas_classifier *cls; + struct list_head *item, *n; + struct cas_cls_rule *r = NULL; + + cls = cas_get_classifier(cache); + ENV_BUG_ON(!cls); + + list_for_each_safe(item, n, &cls->rules) { + r = list_entry(item, struct cas_cls_rule, list); + list_del(item); + _cas_cls_rule_destroy(cls, r); + } + + destroy_workqueue(cls->wq); + + kfree(cls); + cas_set_classifier(cache, NULL); + + CAS_CLS_MSG(KERN_INFO, "Deinitialized IO classifier\n"); + + return; +} + +/* Initialize classifier context */ +static struct cas_classifier *_cas_cls_init(ocf_cache_t cache) +{ + struct cas_classifier *cls; + + cls = kzalloc(sizeof(*cls), GFP_KERNEL); + if (!cls) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cls->rules); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + cls->wq = alloc_workqueue("kcas_clsd", WQ_UNBOUND | WQ_FREEZABLE, 1); +#else + cls->wq = create_singlethread_workqueue("kcas_clsd"); +#endif + if (!cls->wq) { + kfree(cls); + return ERR_PTR(-ENOMEM); + } + + rwlock_init(&cls->lock); + + CAS_CLS_MSG(KERN_INFO, "Initialized IO classifier\n"); + + return cls; +} + +/* Initialize classifier and create rules for existing I/O classes */ +int cas_cls_init(ocf_cache_t cache) +{ + struct cas_classifier *cls; + unsigned result = 0; + unsigned i; + + cls = _cas_cls_init(cache); + if (IS_ERR(cls)) + return PTR_ERR(cls); + cas_set_classifier(cache, cls); + + /* Update rules for all I/O classes except 0 - this is default for all + * unclassified I/O */ + for (i = 1; i < OCF_IO_CLASS_MAX; i++) { + result = _cas_cls_rule_init(cache, i); + if (result) + break; + } + + if (result) + cas_cls_deinit(cache); + + return result; +} + +/* Determine whether io matches rule */ +static cas_cls_eval_t cas_cls_process_rule(struct cas_classifier *cls, + struct cas_cls_rule *r, struct cas_cls_io *io, + ocf_part_id_t *part_id) +{ + struct list_head *item; + struct cas_cls_condition *c; + cas_cls_eval_t ret = cas_cls_eval_no, rr; + + CAS_CLS_DEBUG_TRACE(" Processing rule for class %d\n", r->part_id); + list_for_each(item, &r->conditions) { + + c = list_entry(item, struct cas_cls_condition, list); + + if (!ret.yes && c->l_op == cas_cls_logical_and) + break; + + rr = c->handler->test(cls, c, io, *part_id); + CAS_CLS_DEBUG_TRACE(" Processing condition %s => %d, stop:%d " + "(l_op: %d)\n", c->handler->token, rr.yes, + rr.stop, (int)c->l_op); + + ret.yes = (c->l_op == cas_cls_logical_and) ? + rr.yes && ret.yes : + rr.yes || ret.yes; + ret.stop = rr.stop; + + if (ret.stop) + break; + } + + CAS_CLS_DEBUG_TRACE(" Rule %d output => %d stop: %d\n", r->part_id, + ret.yes, ret.stop); + + return ret; +} + +/* Fill in cas_cls_io for given bio - it is assumed that ctx is + * zeroed upon entry */ +static void _cas_cls_get_bio_context(struct bio *bio, + struct cas_cls_io *ctx) +{ + struct page *page = NULL; + + if (!bio) + return; + ctx->bio = bio; + + if (!SEGMENT_BVEC(bio_iovec(bio))) + return; + + page = bio_page(bio); + + if (!page) + return; + ctx->page = page; + + if (PageAnon(page)) + return; + + if (PageSlab(page) || PageCompound(page)) + return; + + if (!page->mapping) + return; + + ctx->inode = page->mapping->host; + + return; +} + +/* Determine I/O class for bio */ +ocf_part_id_t cas_cls_classify(ocf_cache_t cache, struct bio *bio) +{ + struct cas_classifier *cls; + struct cas_cls_io io = {}; + struct list_head *item; + struct cas_cls_rule *r; + ocf_part_id_t part_id = 0; + cas_cls_eval_t ret; + + cls = cas_get_classifier(cache); + ENV_BUG_ON(!cls); + + _cas_cls_get_bio_context(bio, &io); + + read_lock(&cls->lock); + CAS_CLS_DEBUG_TRACE("%s\n", "Starting processing"); + list_for_each(item, &cls->rules) { + r = list_entry(item, struct cas_cls_rule, list); + ret = cas_cls_process_rule(cls, r, &io, &part_id); + if (ret.yes) + part_id = r->part_id; + if (ret.stop) + break; + } + read_unlock(&cls->lock); + + return part_id; +} + diff --git a/modules/cas_cache/classifier.h b/modules/cas_cache/classifier.h new file mode 100644 index 000000000..1df063255 --- /dev/null +++ b/modules/cas_cache/classifier.h @@ -0,0 +1,33 @@ +/* +* Copyright(c) 2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CLASSIFIER_H__ +#define __CLASSIFIER_H__ + +struct cas_cls_rule; + +/* Initialize classifier and create rules for existing I/O classes */ +int cas_cls_init(ocf_cache_t cache); + +/* Deinitialize classifier and remove rules */ +void cas_cls_deinit(ocf_cache_t cache); + +/* Allocate and initialize classification rule */ +int cas_cls_rule_create(ocf_cache_t cache, + ocf_part_id_t part_id, const char* rule, + struct cas_cls_rule **cls_rule); + +/* Deinit classification rule */ +void cas_cls_rule_destroy(ocf_cache_t cache, struct cas_cls_rule *r); + +/* Bind classification rule to io class */ +void cas_cls_rule_apply(ocf_cache_t cache, ocf_part_id_t part_id, + struct cas_cls_rule *r); + +/* Determine I/O class for bio */ +ocf_part_id_t cas_cls_classify(ocf_cache_t cache, struct bio *bio); + + +#endif diff --git a/modules/cas_cache/classifier_defs.h b/modules/cas_cache/classifier_defs.h new file mode 100644 index 000000000..0ce380996 --- /dev/null +++ b/modules/cas_cache/classifier_defs.h @@ -0,0 +1,139 @@ +/* +* Copyright(c) 2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CLASSIFIER_DEFS_H__ +#define __CLASSIFIER_DEFS_H__ + +/* Rule matches 1:1 with io class. It contains multiple conditions with + * associated logical operator (and/or) */ +struct cas_cls_rule { + /* Rules list element */ + struct list_head list; + + /* Associated partition id */ + ocf_part_id_t part_id; + + /* Conditions for this rule */ + struct list_head conditions; +}; + +/* Classifier context - one per cache instance. */ +struct cas_classifier { + /* Rules list head */ + struct list_head rules; + + /* Directory inode resolving workqueue */ + struct workqueue_struct *wq; + + /* Lock for rules list */ + rwlock_t lock; +}; + +struct cas_cls_condition_handler; + +/* cas_cls_condition represents single test (e.g. file_size <= 4K) plus + * logical operator (and/or) to combine evaluation of this condition with + * previous conditions within one rule */ +struct cas_cls_condition { + /* Condition handler */ + struct cas_cls_condition_handler *handler; + + /* Conditions list element */ + struct list_head list; + + /* Data specific to this condition instance */ + void *context; + + /* Logical operator to apply to previous conditions evaluation */ + int l_op; +}; + +/* Helper structure aggregating I/O data often accessed by condition handlers */ +struct cas_cls_io { + /* bio */ + struct bio *bio; + + /* First page associated with bio */ + struct page *page; + + /* Inode associated with page */ + struct inode *inode; +}; + +/* Condition evaluation return flags */ +typedef struct cas_cls_eval { + uint8_t yes : 1; + uint8_t stop : 1; +} cas_cls_eval_t; + +static const cas_cls_eval_t cas_cls_eval_yes = { .yes = 1 }; +static const cas_cls_eval_t cas_cls_eval_no = { }; + +/* Logical operators */ +enum cas_cls_logical_op { + cas_cls_logical_and = 0, + cas_cls_logical_or +}; + +/* Condition handler - abstraction over different kinds of condition checks + * (e.g. file size, metadata). Does not contain all the data required to + * evaluate condition (e.g. actual file size value), these are stored in + * @context member of cas_cls_condition object, provided as input argument to + * test, ctr and dtr callbacks. */ +struct cas_cls_condition_handler { + /* String representing this condition class */ + const char *token; + + /* Condition test */ + cas_cls_eval_t (*test)(struct cas_classifier *cls, + struct cas_cls_condition *c, struct cas_cls_io *io, + ocf_part_id_t part_id); + + /* Condition constructor */ + int (*ctr)(struct cas_classifier *cls, struct cas_cls_condition *c, + char *data); + + /* Condition destructor */ + void (*dtr)(struct cas_classifier *cls, struct cas_cls_condition *c); +}; + +/* Numeric condition numeric operators */ +enum cas_cls_numeric_op { + cas_cls_numeric_eq = 0, + cas_cls_numeric_ne = 1, + cas_cls_numeric_lt = 2, + cas_cls_numeric_gt = 3, + cas_cls_numeric_le = 4, + cas_cls_numeric_ge = 5, +}; + +/* Numeric condition context */ +struct cas_cls_numeric { + /* Arithmetic operator */ + enum cas_cls_numeric_op operator; + + /* Condition operand as unsigned int */ + uint64_t v_u64; +}; + +/* Directory condition context */ +struct cas_cls_directory { + /* 1 if directory had been resolved */ + int resolved; + + /* Dir path */ + char *pathname; + + /* Resolved inode */ + unsigned long i_ino; + + /* Back pointer to classifier context */ + struct cas_classifier *cls; + + /* Work item associated with resolving dir for this condition */ + struct delayed_work d_work; +}; + +#endif diff --git a/modules/cas_cache/context.c b/modules/cas_cache/context.c new file mode 100644 index 000000000..fdd66fbb2 --- /dev/null +++ b/modules/cas_cache/context.c @@ -0,0 +1,482 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#include "context.h" +#include "utils/utils_rpool.h" +#include "utils/utils_data.h" +#include "utils/utils_gc.h" +#include "threads.h" + +struct ocf_mpool *cas_bvec_pool; + +struct cas_reserve_pool *cas_bvec_pages_rpool; + +#define CAS_ALLOC_PAGE_LIMIT 1024 +#define PG_cas PG_private + +#define CAS_LOG_RATELIMIT HZ * 5 +/* High burst limit to ensure cache init logs are printed properly */ +#define CAS_LOG_BURST_LIMIT 50 + +static inline void _cas_page_set_priv(struct page *page) +{ + set_bit(PG_cas , &page->flags); +} + +static inline void _cas_page_clear_priv(struct page *page) +{ + clear_bit(PG_cas , &page->flags); + page->private = 0; +} + +static inline int _cas_page_test_priv(struct page *page) +{ + return test_bit(PG_cas , &page->flags); +} + +static void _cas_free_page_rpool(void *allocator_ctx, void *item) +{ + struct page *page = virt_to_page(item); + + _cas_page_clear_priv(page); + __free_page(page); +} + +static void _cas_page_set_cpu(struct page *page, int cpu) +{ + page->private = cpu; +} + +void *_cas_alloc_page_rpool(void *allocator_ctx, int cpu) +{ + struct page *page; + + page = alloc_page(GFP_NOIO | __GFP_NORETRY); + if (!page) + return NULL; + + if (_cas_page_test_priv(page)) { + printk(KERN_WARNING "CAS private bit is set\n"); + WARN(true, OCF_PREFIX_SHORT" CAS private bit is set\n"); + } + + _cas_page_set_priv(page); + _cas_page_set_cpu(page, cpu); + return page_address(page); +} + +static int _cas_page_get_cpu(struct page *page) +{ + return page->private; +} + +/* *** CONTEXT DATA OPERATIONS *** */ + +/* + * + */ +ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc) +{ + struct blk_data *data; + uint32_t i; + void *page_addr = NULL; + struct page *page = NULL; + int cpu; + + data = ocf_mpool_new(cas_bvec_pool, pages); + + if (!data) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate BIO vector.\n"); + return NULL; + } + + data->size = pages; + + for (i = 0; i < pages; ++i) { + page_addr = cas_rpool_try_get(cas_bvec_pages_rpool, &cpu); + if (page_addr) { + data->vec[i].bv_page = virt_to_page(page_addr); + _cas_page_set_cpu(data->vec[i].bv_page, cpu); + } else { + data->vec[i].bv_page = alloc_page(GFP_NOIO); + } + + if (!data->vec[i].bv_page) + break; + + if (zalloc) { + if (!page_addr) { + page_addr = page_address( + data->vec[i].bv_page); + } + memset(page_addr, 0, PAGE_SIZE); + } + + data->vec[i].bv_len = PAGE_SIZE; + data->vec[i].bv_offset = 0; + } + + /* One of allocations failed */ + if (i != pages) { + for (pages = 0; pages < i; pages++) { + page = data->vec[i].bv_page; + + if (page && !(_cas_page_test_priv(page) && + !cas_rpool_try_put(cas_bvec_pages_rpool, + page_address(page), + _cas_page_get_cpu(page)))) { + __free_page(page); + } + } + + ocf_mpool_del(cas_bvec_pool, data, pages); + data = NULL; + } else { + /* Initialize iterator */ + cas_io_iter_init(&data->iter, data->vec, data->size); + } + + return data; +} + +ctx_data_t *cas_ctx_data_alloc(uint32_t pages) +{ + return __cas_ctx_data_alloc(pages, false); +} + +ctx_data_t *cas_ctx_data_zalloc(uint32_t pages) +{ + return __cas_ctx_data_alloc(pages, true); +} + +/* + * + */ +void cas_ctx_data_free(ctx_data_t *ctx_data) +{ + uint32_t i; + struct page *page = NULL; + struct blk_data *data = ctx_data; + + if (!data) + return; + + for (i = 0; i < data->size; i++) { + page = data->vec[i].bv_page; + + if (!(_cas_page_test_priv(page) && !cas_rpool_try_put( + cas_bvec_pages_rpool, + page_address(page), + _cas_page_get_cpu(page)))) + __free_page(page); + } + + ocf_mpool_del(cas_bvec_pool, data, data->size); +} + +static int _cas_ctx_data_mlock(ctx_data_t *ctx_data) +{ + return 0; +} + +static void _cas_ctx_data_munlock(ctx_data_t *ctx_data) +{ +} + +void cas_ctx_data_secure_erase(ctx_data_t *ctx_data) +{ + struct blk_data *data = ctx_data; + uint32_t i; + void *ptr; + + for (i = 0; i < data->size; i++) { + ptr = page_address(data->vec[i].bv_page); + memset(ptr, 0, PAGE_SIZE); + } +} + +/* + * + */ +static uint32_t _cas_ctx_read_data(void *dst, ctx_data_t *src, + uint32_t size) +{ + struct blk_data *data = src; + + return cas_io_iter_cpy_to_data(dst, &data->iter, size); +} + +/* + * + */ +static uint32_t _cas_ctx_write_data(ctx_data_t *dst, const void *src, + uint32_t size) +{ + struct blk_data *data = dst; + + return cas_io_iter_cpy_from_data(&data->iter, src, size); +} + +/* + * + */ +static uint32_t _cas_ctx_zero_data(ctx_data_t *dst, uint32_t size) +{ + struct blk_data *data = dst; + + return cas_io_iter_zero(&data->iter, size); +} + +/* + * + */ +static uint32_t _cas_ctx_seek_data(ctx_data_t *dst, + ctx_data_seek_t seek, uint32_t offset) +{ + struct blk_data *data = dst; + + switch (seek) { + case ctx_data_seek_begin: + cas_io_iter_init(&data->iter, data->vec, data->size); + + case ctx_data_seek_current: + /* TODO Implement this if needed or remove this from enum */ + break; + + default: + BUG(); + return 0; + } + + return cas_io_iter_move(&data->iter, offset); +} + +/* + * + */ +static uint64_t _cas_ctx_data_copy(ctx_data_t *dst, ctx_data_t *src, + uint64_t to, uint64_t from, uint64_t bytes) +{ + struct blk_data *src_data = src, *dst_data = dst; + + return cas_data_cpy(dst_data->vec, dst_data->size, src_data->vec, + src_data->size, to, from, bytes); +} + +static int _cas_ctx_cleaner_init(ocf_cleaner_t c) +{ + return cas_create_cleaner_thread(c); +} + +static void _cas_ctx_cleaner_stop(ocf_cleaner_t c) +{ + return cas_stop_cleaner_thread(c); +} + +static int _cas_ctx_metadata_updater_init(ocf_metadata_updater_t mu) +{ + return cas_create_metadata_updater_thread(mu); +} + +static void _cas_ctx_metadata_updater_kick(ocf_metadata_updater_t mu) +{ + return cas_kick_metadata_updater_thread(mu); +} + +static void _cas_ctx_metadata_updater_stop(ocf_metadata_updater_t mu) +{ + return cas_stop_metadata_updater_thread(mu); +} + +/* + * + */ +static int _cas_ctx_logger_printf(ocf_logger_t logger, ocf_logger_lvl_t lvl, + const char *fmt, va_list args) +{ + static const char* level[] = { + [log_emerg] = KERN_EMERG, + [log_alert] = KERN_ALERT, + [log_crit] = KERN_CRIT, + [log_err] = KERN_ERR, + [log_warn] = KERN_WARNING, + [log_notice] = KERN_NOTICE, + [log_info] = KERN_INFO, + [log_debug] = KERN_DEBUG, + }; + char *format; + if (((unsigned)lvl) >= sizeof(level)) + return -EINVAL; + + format = kasprintf(GFP_ATOMIC, "%s%s", level[lvl], fmt); + if (!format) + return -ENOMEM; + + vprintk(format, args); + + kfree(format); + + return 0; +} + +/* + * + */ +static int _cas_ctx_logger_printf_rl(ocf_logger_t logger, const char *func_name) +{ + static DEFINE_RATELIMIT_STATE(cas_log_rl, CAS_LOG_RATELIMIT, + CAS_LOG_BURST_LIMIT); + + if (!func_name) + return -EINVAL; + + return CAS_RATELIMIT(&cas_log_rl, func_name); +} + +/* + * + */ +static int _cas_ctx_logger_dump_stack(ocf_logger_t logger) +{ + dump_stack(); + + return 0; +} + +static const struct ocf_ctx_config ctx_cfg = { + .name = "CAS Linux Kernel", + .ops = { + .data = { + .alloc = cas_ctx_data_alloc, + .free = cas_ctx_data_free, + .mlock = _cas_ctx_data_mlock, + .munlock = _cas_ctx_data_munlock, + .read = _cas_ctx_read_data, + .write = _cas_ctx_write_data, + .zero = _cas_ctx_zero_data, + .seek = _cas_ctx_seek_data, + .copy = _cas_ctx_data_copy, + .secure_erase = cas_ctx_data_secure_erase, + }, + + .cleaner = { + .init = _cas_ctx_cleaner_init, + .stop = _cas_ctx_cleaner_stop, + }, + + .metadata_updater = { + .init = _cas_ctx_metadata_updater_init, + .kick = _cas_ctx_metadata_updater_kick, + .stop = _cas_ctx_metadata_updater_stop, + }, + + .logger = { + .printf = _cas_ctx_logger_printf, + .printf_rl = _cas_ctx_logger_printf_rl, + .dump_stack = _cas_ctx_logger_dump_stack, + }, + }, +}; + +/* *** CONTEXT INITIALIZATION *** */ + +int cas_initialize_context(void) +{ + struct blk_data data; + int ret; + + ret = ocf_ctx_init(&cas_ctx, &ctx_cfg); + if (ret < 0) + return ret; + + cas_bvec_pool = ocf_mpool_create(NULL, sizeof(data), + sizeof(data.vec[0]), GFP_NOIO, 7, "cas_biovec"); + + if (!cas_bvec_pool) { + printk(KERN_ERR "Cannot create BIO vector memory pool\n"); + ret = -ENOMEM; + goto err_ctx; + } + + cas_bvec_pages_rpool = cas_rpool_create(CAS_ALLOC_PAGE_LIMIT, + NULL, PAGE_SIZE, _cas_alloc_page_rpool, + _cas_free_page_rpool, NULL); + if (!cas_bvec_pages_rpool) { + printk(KERN_ERR "Cannot create reserve pool for " + "BIO vector memory pool\n"); + ret = -ENOMEM; + goto err_mpool; + } + + cas_garbage_collector_init(); + + ret = block_dev_init(); + if (ret) { + printk(KERN_ERR "Cannot initialize block device layer\n"); + goto err_rpool; + + } + + ret = atomic_dev_init(); + if (ret) { + printk(KERN_ERR "Cannot initialize atomic device layer\n"); + goto err_block_dev; + } + + ocf_mngt_core_pool_init(cas_ctx); + + return 0; + +err_block_dev: + block_dev_deinit(); +err_rpool: + cas_rpool_destroy(cas_bvec_pages_rpool, _cas_free_page_rpool, NULL); +err_mpool: + ocf_mpool_destroy(cas_bvec_pool); +err_ctx: + ocf_ctx_exit(cas_ctx); + + return ret; +} + +int cas_cleanup_context(void) +{ + ocf_mngt_core_pool_deinit(cas_ctx); + block_dev_deinit(); + atomic_dev_deinit(); + cas_garbage_collector_deinit(); + ocf_mpool_destroy(cas_bvec_pool); + cas_rpool_destroy(cas_bvec_pages_rpool, _cas_free_page_rpool, NULL); + + return ocf_ctx_exit(cas_ctx); +} + +/* *** CONTEXT DATA HELPER FUNCTION *** */ + +/* + * + */ +struct blk_data *cas_alloc_blk_data(uint32_t size, gfp_t flags) +{ + struct blk_data *data = ocf_mpool_new_f(cas_bvec_pool, size, flags); + + if (data) + data->size = size; + + return data; +} + +/* + * + */ +void cas_free_blk_data(struct blk_data *data) +{ + if (!data) + return; + + ocf_mpool_del(cas_bvec_pool, data, data->size); +} + diff --git a/modules/cas_cache/context.h b/modules/cas_cache/context.h new file mode 100644 index 000000000..a8dc588f3 --- /dev/null +++ b/modules/cas_cache/context.h @@ -0,0 +1,79 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +#include "linux_kernel_version.h" + +struct bio_vec_iter { + struct bio_vec *vec; + uint32_t vec_size; + uint32_t idx; + uint32_t offset; + uint32_t len; + struct bio_vec *ivec; +}; + +struct blk_data { + /** + * @brief Atomic counter for core device + */ + atomic_t master_remaining; + + /** + * @brief Core device request context (core private info) + */ + void *master_io_req; + + /** + * @brief CAS IO with which data is associated + */ + struct ocf_io *io; + + /** + * @brief List item used for IO splitting + */ + struct list_head list; + + /** + * @brief Timestamp of start processing request + */ + unsigned long long start_time; + + /** + * @brief Request data siz + */ + uint32_t size; + + /** + * @brief This filed indicates an error for request + */ + int error; + + /** + * @brief Iterator for accessing data + */ + struct bio_vec_iter iter; + + /** + * @brief Request data + */ + struct bio_vec vec[]; +}; + +struct blk_data *cas_alloc_blk_data(uint32_t size, gfp_t flags); +void cas_free_blk_data(struct blk_data *data); + +ctx_data_t *cas_ctx_data_alloc(uint32_t pages); +ctx_data_t *cas_ctx_data_zalloc(uint32_t pages); +void cas_ctx_data_free(ctx_data_t *ctx_data); +void cas_ctx_data_secure_erase(ctx_data_t *ctx_data); + +int cas_initialize_context(void); +int cas_cleanup_context(void); + +#endif /* __CONTEXT_H__ */ diff --git a/modules/cas_cache/control.c b/modules/cas_cache/control.c new file mode 100644 index 000000000..502805093 --- /dev/null +++ b/modules/cas_cache/control.c @@ -0,0 +1,80 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include "linux_kernel_version.h" +#include "service_ui_ioctl.h" +#include "control.h" +#include "cas_cache/cas_cache.h" + +struct cas_ctrl_device { + struct cdev cdev; + struct class *class; + dev_t dev; +}; + +static struct cas_ctrl_device _control_device; + +static const struct file_operations _ctrl_dev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = cas_service_ioctl_ctrl +}; + +int __init cas_ctrl_device_init(void) +{ + struct cas_ctrl_device *ctrl = &_control_device; + struct device *device; + int result = 0; + + result = alloc_chrdev_region(&ctrl->dev, 0, 1, "cas"); + if (result) { + printk(KERN_ERR "Cannot allocate control chrdev number.\n"); + goto error_alloc_chrdev_region; + } + + cdev_init(&ctrl->cdev, &_ctrl_dev_fops); + + result = cdev_add(&ctrl->cdev, ctrl->dev, 1); + if (result) { + printk(KERN_ERR "Cannot add control chrdev.\n"); + goto error_cdev_add; + } + + ctrl->class = class_create(THIS_MODULE, "cas"); + if (IS_ERR(ctrl->class)) { + printk(KERN_ERR "Cannot create control chrdev class.\n"); + result = PTR_ERR(ctrl->class); + goto error_class_create; + } + + device = device_create(ctrl->class, NULL, ctrl->dev, NULL, + "cas_ctrl"); + if (IS_ERR(device)) { + printk(KERN_ERR "Cannot create control chrdev.\n"); + result = PTR_ERR(device); + goto error_device_create; + } + + return result; + +error_device_create: + class_destroy(ctrl->class); +error_class_create: + cdev_del(&ctrl->cdev); +error_cdev_add: + unregister_chrdev_region(ctrl->dev, 1); +error_alloc_chrdev_region: + return result; +} + +void __exit cas_ctrl_device_deinit(void) +{ + struct cas_ctrl_device *ctrl = &_control_device; + + device_destroy(ctrl->class, ctrl->dev); + class_destroy(ctrl->class); + cdev_del(&ctrl->cdev); + unregister_chrdev_region(ctrl->dev, 1); +} diff --git a/modules/cas_cache/control.h b/modules/cas_cache/control.h new file mode 100644 index 000000000..a2b978d40 --- /dev/null +++ b/modules/cas_cache/control.h @@ -0,0 +1,11 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CAS_CONTROL_H__ +#define __CAS_CONTROL_H__ + +int __init cas_ctrl_device_init(void); +void __exit cas_ctrl_device_deinit(void); + +#endif diff --git a/modules/cas_cache/layer_cache_management.c b/modules/cas_cache/layer_cache_management.c new file mode 100644 index 000000000..f6ecc9142 --- /dev/null +++ b/modules/cas_cache/layer_cache_management.c @@ -0,0 +1,1863 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#include "utils/utils_blk.h" +#include "threads.h" + +extern u32 max_writeback_queue_size; +extern u32 writeback_queue_unblock_size; +extern u32 metadata_layout; +extern u32 unaligned_io; +extern u32 seq_cut_off_mb; +extern u32 use_io_scheduler; + +struct _cache_mng_sync_context { + struct completion compl; + int *result; +}; + +static void _cache_mng_save_sync_complete(ocf_cache_t cache, void *priv, + int error) +{ + struct _cache_mng_sync_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_save_sync(ocf_cache_t cache) +{ + struct _cache_mng_sync_context context; + int result; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_save(cache, _cache_mng_save_sync_complete, &context); + wait_for_completion(&context.compl); + + return result; +} + +static void _cache_mng_cache_flush_complete(ocf_cache_t cache, void *priv, + int error) +{ + struct _cache_mng_sync_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_cache_flush_sync(ocf_cache_t cache, bool interruption) +{ + struct _cache_mng_sync_context context; + int result; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_flush(cache, interruption, + _cache_mng_cache_flush_complete, &context); + wait_for_completion(&context.compl); + + return result; +} + +static void _cache_mng_core_flush_complete(ocf_core_t core, void *priv, + int error) +{ + struct _cache_mng_sync_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_core_flush_sync(ocf_core_t core, bool interruption) +{ + struct _cache_mng_sync_context context; + int result; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_core_flush(core, interruption, + _cache_mng_core_flush_complete, &context); + wait_for_completion(&context.compl); + + return result; +} + +static void _cache_mng_cache_stop_complete(ocf_cache_t cache, void *priv, + int error) +{ + struct _cache_mng_sync_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_cache_stop_sync(ocf_cache_t cache) +{ + struct _cache_mng_sync_context context; + int result; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_stop(cache, _cache_mng_cache_stop_complete, &context); + wait_for_completion(&context.compl); + + return result; +} + +int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = _cache_mng_core_flush_sync(core, true); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_flush_device(ocf_cache_id_t id) +{ + int result; + ocf_cache_t cache; + + result = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = _cache_mng_cache_flush_sync(cache, true); + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_set_policy(cache, type); + if (result) + goto out; + + result = _cache_mng_save_sync(cache); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type) +{ + ocf_cleaning_t tmp_type; + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_get_policy(cache, &tmp_type); + + if (result == 0) + *type = tmp_type; + + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t param_value) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_set_param(cache, type, + param_id, param_value); + if (result) + goto out; + + result = _cache_mng_save_sync(cache); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t *param_value) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_get_param(cache, type, + param_id, param_value); + + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +struct get_paths_ctx { + char *core_path_name_tab; + int max_count; + int position; +}; + +int _cache_mng_core_pool_get_paths_visitor(ocf_uuid_t uuid, void *ctx) +{ + struct get_paths_ctx *visitor_ctx = ctx; + + if (visitor_ctx->position >= visitor_ctx->max_count) + return 0; + + if (copy_to_user((void __user *)visitor_ctx->core_path_name_tab + + (visitor_ctx->position * MAX_STR_LEN), + uuid->data, uuid->size)) { + return -ENODATA; + } + + visitor_ctx->position++; + + return 0; +} + +int cache_mng_core_pool_get_paths(struct kcas_core_pool_path *cmd_info) +{ + struct get_paths_ctx visitor_ctx = {0}; + int result; + + visitor_ctx.core_path_name_tab = cmd_info->core_path_tab; + visitor_ctx.max_count = cmd_info->core_pool_count; + + result = ocf_mngt_core_pool_visit(cas_ctx, + _cache_mng_core_pool_get_paths_visitor, + &visitor_ctx); + + cmd_info->core_pool_count = visitor_ctx.position; + return result; +} + +int cache_mng_core_pool_remove(struct kcas_core_pool_remove *cmd_info) +{ + struct ocf_volume_uuid uuid; + ocf_volume_t vol; + + uuid.data = cmd_info->core_path_name; + uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN); + + vol = ocf_mngt_core_pool_lookup(cas_ctx, &uuid, + ocf_ctx_get_volume_type(cas_ctx, + BLOCK_DEVICE_VOLUME)); + if (!vol) + return -OCF_ERR_CORE_NOT_AVAIL; + + ocf_volume_close(vol); + ocf_mngt_core_pool_remove(cas_ctx, vol); + + return 0; +} + +struct cache_mng_metadata_probe_context { + struct completion compl; + struct kcas_cache_check_device *cmd_info; + int *result; +}; + +static void cache_mng_metadata_probe_end(void *priv, int error, + struct ocf_metadata_probe_status *status) +{ + struct cache_mng_metadata_probe_context *context = priv; + struct kcas_cache_check_device *cmd_info = context->cmd_info; + + *context->result = error; + + if (error == -ENODATA || error == -EBADF) { + cmd_info->is_cache_device = false; + *context->result = 0; + } else if (error == 0) { + cmd_info->is_cache_device = true; + cmd_info->clean_shutdown = status->clean_shutdown; + cmd_info->cache_dirty = status->cache_dirty; + } + + complete(&context->compl); +} + +int cache_mng_cache_check_device(struct kcas_cache_check_device *cmd_info) +{ + struct cache_mng_metadata_probe_context context; + struct block_device *bdev; + ocf_volume_t volume; + char holder[] = "CAS CHECK CACHE DEVICE\n"; + int result; + + bdev = OPEN_BDEV_EXCLUSIVE(cmd_info->path_name, FMODE_READ, holder); + if (IS_ERR(bdev)) { + return (PTR_ERR(bdev) == -EBUSY) ? + -OCF_ERR_NOT_OPEN_EXC : + -OCF_ERR_INVAL_VOLUME_TYPE; + } + + result = cas_blk_open_volume_by_bdev(&volume, bdev); + if (result) + goto out_bdev; + + cmd_info->format_atomic = (ocf_ctx_get_volume_type_id(cas_ctx, + ocf_volume_get_type(volume)) == ATOMIC_DEVICE_VOLUME); + + init_completion(&context.compl); + context.cmd_info = cmd_info; + context.result = &result; + + ocf_metadata_probe(cas_ctx, volume, cache_mng_metadata_probe_end, + &context); + wait_for_completion(&context.compl); + + cas_blk_close_volume(volume); +out_bdev: + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); + return result; +} + +int cache_mng_prepare_core_cfg(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info) +{ + int result; + + if (strnlen(cmd_info->core_path_name, MAX_STR_LEN) >= MAX_STR_LEN) + return -OCF_ERR_INVAL; + + memset(cfg, 0, sizeof(*cfg)); + cfg->uuid.data = cmd_info->core_path_name; + cfg->uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN) + 1; + cfg->core_id = cmd_info->core_id; + cfg->cache_id = cmd_info->cache_id; + cfg->try_add = cmd_info->try_add; + + if (cas_upgrade_is_in_upgrade()) { + cfg->volume_type = BLOCK_DEVICE_VOLUME; + return 0; + } + + if (cmd_info->update_path) + return 0; + + result = cas_blk_identify_type(cfg->uuid.data, &cfg->volume_type); + if (!result && cfg->volume_type == ATOMIC_DEVICE_VOLUME) + result = -KCAS_ERR_NVME_BAD_FORMAT; + if (OCF_ERR_NOT_OPEN_EXC == abs(result)) { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot open device %s exclusively. " + "It is already opened by another program!\n", + cmd_info->core_path_name); + } + + return result; +} + +int cache_mng_update_core_uuid(ocf_cache_t cache, ocf_core_id_t id, ocf_uuid_t uuid) +{ + ocf_core_t core; + ocf_volume_t vol; + struct block_device *bdev; + struct bd_object *bdvol; + bool match; + int result; + + if (ocf_core_get(cache, id, &core)) { + /* no such core */ + return -ENODEV; + } + + if (ocf_core_get_state(core) != ocf_core_state_active) { + /* core inactive */ + return -ENODEV; + } + + /* get bottom device volume for this core */ + vol = ocf_core_get_volume(core); + bdvol = bd_object(vol); + + /* lookup block device object for device pointed by uuid */ + bdev = LOOKUP_BDEV(uuid->data); + if (IS_ERR(bdev)) { + printk(KERN_ERR "failed to lookup bdev%s\n", (char*)uuid->data); + return -ENODEV; + } + + /* check whether both core id and uuid point to the same block device */ + match = (bdvol->btm_bd == bdev); + + bdput(bdev); + + if (!match) { + printk(KERN_ERR "UUID provided does not match target core device\n"); + return -ENODEV; + } + + result = ocf_mngt_core_set_uuid(core, uuid); + if (result) + return result; + + return _cache_mng_save_sync(cache); +} + +static void _cache_mng_log_core_device_path(ocf_core_t core) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + const ocf_uuid_t core_uuid = (const ocf_uuid_t)ocf_core_get_uuid(core); + + printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as core %s " + "to cache %s\n", (const char*)core_uuid->data, + ocf_core_get_name(core), ocf_cache_get_name(cache)); +} + +static int _cache_mng_log_core_device_path_visitor(ocf_core_t core, void *cntx) +{ + _cache_mng_log_core_device_path(core); + + return 0; +} + +struct _cache_mng_add_core_context { + struct completion compl; + ocf_core_t *core; + int *result; +}; + +/************************************************************ + * Function for adding a CORE object to the cache instance. * + ************************************************************/ + +static void _cache_mng_add_core_complete(ocf_cache_t cache, + ocf_core_t core, void *priv, int error) +{ + struct _cache_mng_add_core_context *context = priv; + + *context->core = core; + *context->result = error; + complete(&context->compl); +} + +static void _cache_mng_remove_core_complete(void *priv, int error); + +int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info) +{ + struct _cache_mng_add_core_context add_context; + struct _cache_mng_sync_context remove_context; + ocf_cache_t cache; + ocf_core_t core; + ocf_core_id_t core_id; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cfg->cache_id, &cache); + if (cfg->try_add && (result == -OCF_ERR_CACHE_NOT_EXIST)) { + result = ocf_mngt_core_pool_add(cas_ctx, &cfg->uuid, + cfg->volume_type); + if (result) { + cmd_info->ext_err_code = + -OCF_ERR_CANNOT_ADD_CORE_TO_POOL; + printk(KERN_ERR OCF_PREFIX_SHORT + "Error occurred during" + " adding core to detached core pool\n"); + } else { + printk(KERN_INFO OCF_PREFIX_SHORT + "Successfully added" + " core to core pool\n"); + } + return result; + } else if (result) { + return result; + } + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (cmd_info && cmd_info->update_path) { + result = cache_mng_update_core_uuid(cache, cfg->core_id, &cfg->uuid); + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; + } + + cfg->seq_cutoff_threshold = seq_cut_off_mb * MiB; + + init_completion(&add_context.compl); + add_context.core = &core; + add_context.result = &result; + + ocf_mngt_cache_add_core(cache, cfg, _cache_mng_add_core_complete, + &add_context); + wait_for_completion(&add_context.compl); + if (result) + goto error_affter_lock; + + core_id = ocf_core_get_id(core); + + result = block_dev_create_exported_object(core); + if (result) + goto error_after_add_core; + + result = block_dev_activate_exported_object(core); + if (result) + goto error_after_create_exported_object; + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + + if (cmd_info) + cmd_info->core_id = core_id; + + _cache_mng_log_core_device_path(core); + + return 0; + +error_after_create_exported_object: + block_dev_destroy_exported_object(core); + +error_after_add_core: + init_completion(&remove_context.compl); + remove_context.result = &result; + ocf_mngt_cache_remove_core(core, _cache_mng_remove_core_complete, + &remove_context); + wait_for_completion(&remove_context.compl); + +error_affter_lock: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + + return result; +} + +/* Flush cache and destroy exported object */ +int _cache_mng_remove_core_prepare(ocf_cache_t cache, ocf_core_t core, + struct kcas_remove_core *cmd, bool destroy) +{ + int result = 0; + int flush_result = 0; + bool core_active; + bool flush_interruptible = !destroy; + + core_active = (ocf_core_get_state(core) == ocf_core_state_active); + + if (cmd->detach && !core_active) { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot detach core which " + "is already inactive!\n"); + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + } + + if (core_active && destroy) { + result = block_dev_destroy_exported_object(core); + if (result) + return result; + } + + if (!cmd->force_no_flush) { + if (core_active) { + /* Flush core */ + flush_result = _cache_mng_core_flush_sync(core, + flush_interruptible); + } else { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot remove inactive core " + "without force option\n"); + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + } + } + + if (flush_result) + result = destroy ? -KCAS_ERR_REMOVED_DIRTY : flush_result; + + return result; +} + +/**************************************************************** + * Function for removing a CORE object from the cache instance + ****************************************************************/ + +static void _cache_mng_remove_core_complete(void *priv, int error) +{ + struct _cache_mng_sync_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd) +{ + struct _cache_mng_sync_context context; + int result, flush_result = 0; + ocf_cache_t cache; + ocf_core_t core; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cmd->cache_id, &cache); + if (result) + return result; + + if (!cmd->force_no_flush) { + /* First check state and flush data (if requested by user) + under read lock */ + result = ocf_mngt_cache_read_lock(cache); + if (result) + goto put; + + result = ocf_core_get(cache, cmd->core_id, &core); + if (result < 0) + goto rd_unlock; + + result = _cache_mng_remove_core_prepare(cache, core, cmd, + false); + if (result) + goto rd_unlock; + + ocf_mngt_cache_read_unlock(cache); + } + + /* Acquire write lock */ + result = ocf_mngt_cache_lock(cache); + if (result) + goto put; + + result = ocf_core_get(cache, cmd->core_id, &core); + if (result < 0) { + goto unlock; + } + + /* + * Destroy exported object and flush core again but don't allow for + * interruption - in case of flush error after exported object had been + * destroyed, instead of trying rolling this back we rather detach core + * and then inform user about error. + */ + result = _cache_mng_remove_core_prepare(cache, core, cmd, true); + if (result == -KCAS_ERR_REMOVED_DIRTY) { + flush_result = result; + result = 0; + } else if (result) { + goto unlock; + } + + init_completion(&context.compl); + context.result = &result; + + if (cmd->detach || flush_result) { + ocf_mngt_cache_detach_core(core, + _cache_mng_remove_core_complete, &context); + } else { + ocf_mngt_cache_remove_core(core, + _cache_mng_remove_core_complete, &context); + } + + wait_for_completion(&context.compl); + + if (!result && flush_result) + result = flush_result; + +unlock: + ocf_mngt_cache_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; + +rd_unlock: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_reset_core_stats(ocf_cache_id_t cache_id, + ocf_core_id_t core_id) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + ocf_core_stats_initialize(core); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return 0; +} + +static inline void io_class_info2cfg(ocf_part_id_t part_id, + struct ocf_io_class_info *info, struct ocf_mngt_io_class_config *cfg) +{ + cfg->class_id = part_id; + cfg->name = info->name; + cfg->prio = info->priority; + cfg->cache_mode = info->cache_mode; + cfg->min_size = info->min_size; + cfg->max_size = info->max_size; +} + +int cache_mng_set_partitions(struct kcas_io_classes *cfg) +{ + ocf_cache_t cache; + struct ocf_mngt_io_classes_config *io_class_cfg; + struct cas_cls_rule *cls_rule[OCF_IO_CLASS_MAX]; + ocf_part_id_t class_id; + int result; + + io_class_cfg = kzalloc(sizeof(struct ocf_mngt_io_class_config) * + OCF_IO_CLASS_MAX, GFP_KERNEL); + if (!io_class_cfg) + return -OCF_ERR_NO_MEM; + + for (class_id = 0; class_id < OCF_IO_CLASS_MAX; class_id++) { + io_class_cfg->config[class_id].class_id = class_id; + + if (!cfg->info[class_id].name[0]) { + io_class_cfg->config[class_id].class_id = class_id; + continue; + } + + io_class_info2cfg(class_id, &cfg->info[class_id], + &io_class_cfg->config[class_id]); + } + + result = ocf_mngt_cache_get_by_id(cas_ctx, cfg->cache_id, &cache); + if (result) + goto out_get; + + for (class_id = 0; class_id < OCF_IO_CLASS_MAX; class_id++) { + result = cas_cls_rule_create(cache, class_id, + cfg->info[class_id].name, + &cls_rule[class_id]); + if (result) + goto out_cls; + } + + result = ocf_mngt_cache_lock(cache); + if (result) + goto out_cls; + + result = ocf_mngt_cache_io_classes_configure(cache, io_class_cfg); + if (result == -OCF_ERR_IO_CLASS_NOT_EXIST) + result = 0; + if(result) + goto out_configure; + + result = _cache_mng_save_sync(cache); + if (result) + goto out_configure; + + for (class_id = 0; class_id < OCF_IO_CLASS_MAX; class_id++) + cas_cls_rule_apply(cache, class_id, cls_rule[class_id]); + +out_configure: + ocf_mngt_cache_unlock(cache); +out_cls: + if (result) { + while (class_id--) + cas_cls_rule_destroy(cache, cls_rule[class_id]); + } + ocf_mngt_cache_put(cache); +out_get: + kfree(io_class_cfg); + return result; +} + +static int _cache_mng_create_exported_object(ocf_core_t core, void *cntx) +{ + int result; + ocf_cache_t cache = ocf_core_get_cache(core); + + result = block_dev_create_exported_object(core); + if (result) { + printk(KERN_ERR "Cannot to create exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + return result; + } + + result = block_dev_activate_exported_object(core); + if (result) { + printk(KERN_ERR "Cannot to activate exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + } + + return result; +} + +static int _cache_mng_destroy_exported_object(ocf_core_t core, void *cntx) +{ + if (block_dev_destroy_exported_object(core)) { + ocf_cache_t cache = ocf_core_get_cache(core); + + printk(KERN_ERR "Cannot to destroy exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + } + + return 0; +} + +static int cache_mng_initialize_core_objects(ocf_cache_t cache) +{ + int result; + + result = ocf_core_visit(cache, _cache_mng_create_exported_object, NULL, + true); + if (result) { + /* Need to cleanup */ + ocf_core_visit(cache, _cache_mng_destroy_exported_object, NULL, + true); + } + + return result; +} + +int cache_mng_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd) +{ + int init_cache, result; + struct atomic_dev_params atomic_params = { 0 }; + struct block_device *bdev; + int part_count; + char holder[] = "CAS START\n"; + bool is_part; + + if (strnlen(cmd->cache_path_name, MAX_STR_LEN) >= MAX_STR_LEN) + return -OCF_ERR_INVAL; + + memset(cfg, 0, sizeof(*cfg)); + + cfg->id = cmd->cache_id; + cfg->cache_mode = cmd->caching_mode; + cfg->cache_line_size = cmd->line_size; + cfg->eviction_policy = cmd->eviction_policy; + cfg->cache_line_size = cmd->line_size; + cfg->pt_unaligned_io = !unaligned_io; + cfg->use_submit_io_fast = !use_io_scheduler; + cfg->locked = true; + cfg->metadata_volatile = false; + cfg->metadata_layout = metadata_layout; + + cfg->backfill.max_queue_size = max_writeback_queue_size; + cfg->backfill.queue_unblock_size = writeback_queue_unblock_size; + + device_cfg->uuid.data = cmd->cache_path_name; + device_cfg->uuid.size = strnlen(device_cfg->uuid.data, MAX_STR_LEN) + 1; + device_cfg->cache_line_size = cmd->line_size; + device_cfg->force = cmd->force; + device_cfg->perform_test = true; + device_cfg->discard_on_start = true; + + init_cache = cmd->init_cache; + + switch (init_cache) { + case CACHE_INIT_NEW: + case CACHE_INIT_LOAD: + break; + default: + return -OCF_ERR_INVAL; + } + + bdev = OPEN_BDEV_EXCLUSIVE(device_cfg->uuid.data, FMODE_READ, holder); + if (IS_ERR(bdev)) { + return (PTR_ERR(bdev) == -EBUSY) ? + -OCF_ERR_NOT_OPEN_EXC : + -OCF_ERR_INVAL_VOLUME_TYPE; + } + + is_part = (bdev->bd_contains != bdev); + part_count = cas_blk_get_part_count(bdev); + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); + + if (!is_part && part_count > 1 && !device_cfg->force) + return -KCAS_ERR_CONTAINS_PART; + + result = cas_blk_identify_type_atomic(device_cfg->uuid.data, + &device_cfg->volume_type, &atomic_params); + if (result) + return result; + + cmd->metadata_mode_optimal = + block_dev_is_metadata_mode_optimal(&atomic_params, + device_cfg->volume_type); + + return 0; +} + +static void _cache_mng_log_cache_device_path(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg) +{ + printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as cache %s\n", + (const char*)device_cfg->uuid.data, + ocf_cache_get_name(cache)); +} + +static void _cas_queue_kick(ocf_queue_t q) +{ + return cas_kick_queue_thread(q); +} + +static void _cas_queue_stop(ocf_queue_t q) +{ + return cas_stop_queue_thread(q); +} + + +const struct ocf_queue_ops queue_ops = { + .kick = _cas_queue_kick, + .stop = _cas_queue_stop, +}; + +static int _cache_mng_start_queues(ocf_cache_t cache) +{ + uint32_t cpus_no = num_online_cpus(); + struct cache_priv *cache_priv; + int result, i; + + cache_priv = ocf_cache_get_priv(cache); + + for (i = 0; i < cpus_no; i++) { + result = ocf_queue_create(cache, &cache_priv->io_queues[i], + &queue_ops); + if (result) + goto err; + + result = cas_create_queue_thread(cache_priv->io_queues[i], i); + if (result) { + ocf_queue_put(cache_priv->io_queues[i]); + goto err; + } + } + + result = ocf_queue_create(cache, &cache_priv->mngt_queue, &queue_ops); + if (result) + goto err; + + result = cas_create_queue_thread(cache_priv->mngt_queue, CAS_CPUS_ALL); + if (result) { + ocf_queue_put(cache_priv->mngt_queue); + goto err; + } + + ocf_mngt_cache_set_mngt_queue(cache, cache_priv->mngt_queue); + + return 0; +err: + while (--i >= 0) + ocf_queue_put(cache_priv->io_queues[i]); + + return result; +} + +struct _cache_mng_attach_context { + struct completion compl; + int *result; +}; + +static void _cache_mng_attach_complete(ocf_cache_t cache, void *priv, int error) +{ + struct _cache_mng_attach_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_start(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd, ocf_cache_t *cache) +{ + struct _cache_mng_attach_context context; + struct cache_priv *cache_priv; + uint32_t cpus_no = num_online_cpus(); + ocf_cache_t tmp_cache; + int result; + + result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); + if (result) + return result; + + cache_priv = vmalloc(sizeof(*cache_priv) + + cpus_no * sizeof(*cache_priv->io_queues)); + if (!cache_priv) { + result = -OCF_ERR_NO_MEM; + goto err_priv; + } + + ocf_cache_set_priv(tmp_cache, cache_priv); + + result = cas_cls_init(tmp_cache); + if (result) + goto err_classifier; + + result = _cache_mng_start_queues(tmp_cache); + if (result) + goto err_queues; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_attach(tmp_cache, device_cfg, + _cache_mng_attach_complete, &context); + + wait_for_completion(&context.compl); + if (result) + goto err_attach; + + _cache_mng_log_cache_device_path(tmp_cache, device_cfg); + + *cache = tmp_cache; + + return 0; + +err_attach: + if (result == -OCF_ERR_NO_FREE_RAM && cmd) { + ocf_mngt_get_ram_needed(tmp_cache, device_cfg, + &cmd->min_free_ram); + } +err_queues: + cas_cls_deinit(tmp_cache); +err_classifier: + vfree(cache_priv); +err_priv: + _cache_mng_cache_stop_sync(tmp_cache); + ocf_mngt_cache_unlock(tmp_cache); + return result; +} + +struct _cache_mng_load_context { + struct completion compl; + int *result; +}; + +static void _cache_mng_load_complete(ocf_cache_t cache, void *priv, int error) +{ + struct _cache_mng_load_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_load(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd, ocf_cache_t *cache) +{ + struct _cache_mng_load_context context; + struct cache_priv *cache_priv; + uint32_t cpus_no = num_online_cpus(); + ocf_cache_t tmp_cache; + int result; + + result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); + if (result) + return result; + + + cache_priv = vmalloc(sizeof(*cache_priv) + + cpus_no * sizeof(*cache_priv->io_queues)); + if (!cache_priv) { + result = -OCF_ERR_NO_MEM; + goto err_priv; + } + + ocf_cache_set_priv(tmp_cache, cache_priv); + + result = _cache_mng_start_queues(tmp_cache); + if (result) + goto err_queues; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_load(tmp_cache, device_cfg, + _cache_mng_load_complete, &context); + + wait_for_completion(&context.compl); + if (result) + goto err_load; + + _cache_mng_log_cache_device_path(tmp_cache, device_cfg); + + result = cas_cls_init(tmp_cache); + if (result) + goto err_load; + + result = cache_mng_initialize_core_objects(tmp_cache); + if (result) + goto err_core_obj; + + ocf_core_visit(tmp_cache, _cache_mng_log_core_device_path_visitor, + NULL, false); + + *cache = tmp_cache; + + return 0; + +err_core_obj: + cas_cls_deinit(tmp_cache); +err_load: + if (result == -OCF_ERR_NO_FREE_RAM && cmd) { + ocf_mngt_get_ram_needed(tmp_cache, device_cfg, + &cmd->min_free_ram); + } +err_queues: + vfree(cache_priv); +err_priv: + _cache_mng_cache_stop_sync(tmp_cache); + ocf_mngt_cache_unlock(tmp_cache); + return result; +} + +int cache_mng_init_instance(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd) +{ + ocf_cache_t cache = NULL; + const char *name; + bool load = (cmd && cmd->init_cache == CACHE_INIT_LOAD); + int result; + + if (!try_module_get(THIS_MODULE)) + return -KCAS_ERR_SYSTEM; + + /* Start cache. Returned cache instance will be locked as it was set + * in configuration. + */ + if (!load) + result = _cache_mng_start(cfg, device_cfg, cmd, &cache); + else + result = _cache_mng_load(cfg, device_cfg, cmd, &cache); + + if (result) { + module_put(THIS_MODULE); + return result; + } + + if (cmd) { + ocf_volume_t cache_obj = ocf_cache_get_volume(cache); + struct bd_object *bd_cache_obj = bd_object(cache_obj); + struct block_device *bdev = bd_cache_obj->btm_bd; + + /* If we deal with whole device, reread partitions */ + if (bdev->bd_contains == bdev) + ioctl_by_bdev(bdev, BLKRRPART, (unsigned long)NULL); + + /* Set other back information */ + name = block_dev_get_elevator_name( + casdsk_disk_get_queue(bd_cache_obj->dsk)); + if (name) + strlcpy(cmd->cache_elevator, + name, MAX_ELEVATOR_NAME); + } + + ocf_mngt_cache_unlock(cache); + + return 0; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] cache_id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[in] thresh new sequential cutoff threshold value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t cache_id, ocf_core_id_t core_id, + uint32_t thresh) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (core_id != OCF_CORE_ID_INVALID) { + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + result = ocf_mngt_core_set_seq_cutoff_threshold(core, thresh); + } else { + result = ocf_mngt_core_set_seq_cutoff_threshold_all(cache, + thresh); + } + + if (result) + goto out; + + result = _cache_mng_save_sync(cache); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[in] policy new sequential cutoff policy value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy policy) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (core_id != OCF_CORE_ID_INVALID) { + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + result = ocf_mngt_core_set_seq_cutoff_policy(core, policy); + } else { + result = ocf_mngt_core_set_seq_cutoff_policy_all(cache, policy); + } + + if (result) + goto out; + + result = _cache_mng_save_sync(cache); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] cache_id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[out] thresh new sequential cutoff threshold value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t cache_id, + ocf_core_id_t core_id, uint32_t *thresh) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = ocf_mngt_core_get_seq_cutoff_threshold(core, thresh); + +out: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[out] policy new sequential cutoff policy value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy *policy) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = ocf_mngt_core_get_seq_cutoff_policy(core, policy); + +out: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic cache mode switching + * @param device caching device to which operation applies + * @param mode target mode (WRITE_THROUGH, WRITE_BACK, WRITE_AROUND etc.) + * @param flush shall we flush dirty data during switch, or shall we flush + * all remaining dirty data before entering new mode? + */ + +int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode, + uint8_t flush) +{ + ocf_cache_mode_t old_mode; + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + old_mode = ocf_cache_get_mode(cache); + + result = ocf_mngt_cache_set_mode(cache, mode); + if (result) + goto out; + + if (flush) { + result = _cache_mng_cache_flush_sync(cache, true); + if (result) { + ocf_mngt_cache_set_mode(cache, old_mode); + goto out; + } + } + + result = _cache_mng_save_sync(cache); + if (result) + ocf_mngt_cache_set_mode(cache, old_mode); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implements --remove-cache command. + * @param[in] device caching device to be removed + * @param[in] flush Boolean: shall we flush dirty data before removing cache. + * if yes, flushing may still be interrupted by user (in which case + * device won't be actually removed and error will be returned) + * @param[in] allow_interruption shall we allow interruption of dirty + * data flushing + */ +int cache_mng_exit_instance(ocf_cache_id_t id, int flush) +{ + ocf_cache_t cache; + struct cache_priv *cache_priv; + int status, flush_status = 0; + + /* Get cache */ + status = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (status) + return status; + + cache_priv = ocf_cache_get_priv(cache); + + status = ocf_mngt_cache_read_lock(cache); + if (status) + goto put; + /* + * Flush cache. Flushing may take a long time, so we allow user + * to interrupt this operation. Hence we do first flush before + * disabling exported object to avoid restoring it in case + * of interruption. That means some new dirty data could appear + * in cache during flush operation which will not be flushed + * this time, so we need to flush cache again after disabling + * exported object. The second flush should be much faster. + */ + if (flush) { + status = _cache_mng_cache_flush_sync(cache, true); + switch (status) { + case -OCF_ERR_CACHE_IN_INCOMPLETE_STATE: + case -OCF_ERR_FLUSHING_INTERRUPTED: + ocf_mngt_cache_read_unlock(cache); + goto put; + default: + flush_status = status; + break; + } + } + + ocf_mngt_cache_read_unlock(cache); + + /* get cache write lock */ + status = ocf_mngt_cache_lock(cache); + if (status) + goto put; + + if (!cas_upgrade_is_in_upgrade()) { + /* If we are not in upgrade - destroy cache devices */ + status = block_dev_destroy_all_exported_objects(cache); + if (status != 0) { + printk(KERN_WARNING + "Failed to remove all cached devices\n"); + goto unlock; + } + } else { + if (flush_status) { + status = flush_status; + goto unlock; + } + /* + * We are being switched to upgrade in flight mode - + * wait for finishing pending core requests + */ + cache_mng_wait_for_rq_finish(cache); + } + + /* Flush cache again. This time we don't allow interruption. */ + if (flush) + flush_status = _cache_mng_cache_flush_sync(cache, false); + + /* Stop cache device */ + status = _cache_mng_cache_stop_sync(cache); + + if (!status && flush_status) + status = -KCAS_ERR_STOPPED_DIRTY; + + module_put(THIS_MODULE); + + cas_cls_deinit(cache); + + ocf_queue_put(cache_priv->mngt_queue); + vfree(cache_priv); + +unlock: + ocf_mngt_cache_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return status; +} + +static int cache_mng_list_caches_visitor(ocf_cache_t cache, void *cntx) +{ + ocf_cache_id_t id = ocf_cache_get_id(cache); + struct kcas_cache_list *list = cntx; + + if (list->id_position >= id) + return 0; + + if (list->in_out_num >= ARRAY_SIZE(list->cache_id_tab)) + return 1; + + list->cache_id_tab[list->in_out_num] = id; + list->in_out_num++; + + return 0; +} + +int cache_mng_list_caches(struct kcas_cache_list *list) +{ + list->in_out_num = 0; + return ocf_mngt_cache_visit(cas_ctx, cache_mng_list_caches_visitor, list); +} + +int cache_mng_interrupt_flushing(ocf_cache_id_t id) +{ + int result; + ocf_cache_t cache; + + result = ocf_mngt_cache_get_by_id(cas_ctx, id, &cache); + if (result) + return result; + + ocf_mngt_cache_flush_interrupt(cache); + + ocf_mngt_cache_put(cache); + + return 0; + +} + +int cache_mng_get_info(struct kcas_cache_info *info) +{ + uint32_t i, j; + int result; + ocf_cache_t cache; + ocf_core_t core; + const struct ocf_volume_uuid *uuid; + + result = ocf_mngt_cache_get_by_id(cas_ctx, info->cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + goto put; + + result = ocf_cache_get_info(cache, &info->info); + if (result) + goto unlock; + + if (info->info.attached) { + uuid = ocf_cache_get_uuid(cache); + strlcpy(info->cache_path_name, uuid->data, + min(sizeof(info->cache_path_name), uuid->size)); + + switch (info->info.volume_type) { + case BLOCK_DEVICE_VOLUME: + info->metadata_mode = CAS_METADATA_MODE_NORMAL; + break; + case ATOMIC_DEVICE_VOLUME: + info->metadata_mode = CAS_METADATA_MODE_ATOMIC; + break; + default: + info->metadata_mode = CAS_METADATA_MODE_INVALID; + break; + } + } + + /* Collect cores IDs */ + for (i = 0, j = 0; j < info->info.core_count && + i < OCF_CORE_MAX; i++) { + if (ocf_core_get(cache, i, &core)) + continue; + + info->core_id[j] = i; + j++; + } + +unlock: + ocf_mngt_cache_read_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_io_class_info(struct kcas_io_class *part) +{ + int result; + ocf_cache_id_t cache_id = part->cache_id; + ocf_core_id_t core_id = part->core_id; + uint32_t io_class_id = part->class_id; + ocf_cache_t cache; + ocf_core_t core; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_cache_io_class_get_info(cache, io_class_id, &part->info); + if (result) + goto end; + + if (part->get_stats) { + result = ocf_core_get(cache, core_id, &core); + if (result < 0) { + result = OCF_ERR_CORE_NOT_AVAIL; + goto end; + } + + result = ocf_core_io_class_get_stats(core, io_class_id, + &part->stats); + } + +end: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_core_info(struct kcas_core_info *info) +{ + ocf_cache_t cache; + ocf_core_t core; + const struct ocf_volume_uuid *uuid; + int result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, info->cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if(result) + goto put; + + result = ocf_core_get(cache, info->core_id, &core); + if (result < 0) { + result = OCF_ERR_CORE_NOT_AVAIL; + goto unlock; + } + + result = ocf_core_get_stats(core, &info->stats); + if (result) + goto unlock; + + uuid = ocf_core_get_uuid(core); + + strlcpy(info->core_path_name, uuid->data, + min(sizeof(info->core_path_name), uuid->size)); + + info->state = ocf_core_get_state(core); + +unlock: + ocf_mngt_cache_read_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; +} + +static int cache_mng_wait_for_rq_finish_visitor(ocf_core_t core, void *cntx) +{ + ocf_volume_t obj = ocf_core_get_volume(core); + struct bd_object *bdobj = bd_object(obj); + + while (atomic64_read(&bdobj->pending_rqs)) + io_schedule(); + + return 0; +} + +void cache_mng_wait_for_rq_finish(ocf_cache_t cache) +{ + ocf_core_visit(cache, cache_mng_wait_for_rq_finish_visitor, NULL, true); +} + +int cache_mng_set_core_params(struct kcas_set_core_param *info) +{ + switch (info->param_id) { + case core_param_seq_cutoff_threshold: + return cache_mng_set_seq_cutoff_threshold(info->cache_id, + info->core_id, info->param_value); + case core_param_seq_cutoff_policy: + return cache_mng_set_seq_cutoff_policy(info->cache_id, + info->core_id, info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_get_core_params(struct kcas_get_core_param *info) +{ + switch (info->param_id) { + case core_param_seq_cutoff_threshold: + return cache_mng_get_seq_cutoff_threshold(info->cache_id, + info->core_id, &info->param_value); + case core_param_seq_cutoff_policy: + return cache_mng_get_seq_cutoff_policy(info->cache_id, + info->core_id, &info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_set_cache_params(struct kcas_set_cache_param *info) +{ + switch (info->param_id) { + case cache_param_cleaning_policy_type: + return cache_mng_set_cleaning_policy(info->cache_id, + info->param_value); + + case cache_param_cleaning_alru_wake_up_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_wake_up_time, + info->param_value); + case cache_param_cleaning_alru_stale_buffer_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_stale_buffer_time, + info->param_value); + case cache_param_cleaning_alru_flush_max_buffers: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_flush_max_buffers, + info->param_value); + case cache_param_cleaning_alru_activity_threshold: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_activity_threshold, + info->param_value); + + case cache_param_cleaning_acp_wake_up_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_wake_up_time, + info->param_value); + case cache_param_cleaning_acp_flush_max_buffers: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_flush_max_buffers, + info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_get_cache_params(struct kcas_get_cache_param *info) +{ + switch (info->param_id) { + case cache_param_cleaning_policy_type: + return cache_mng_get_cleaning_policy(info->cache_id, + &info->param_value); + + case cache_param_cleaning_alru_wake_up_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_wake_up_time, + &info->param_value); + case cache_param_cleaning_alru_stale_buffer_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_stale_buffer_time, + &info->param_value); + case cache_param_cleaning_alru_flush_max_buffers: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_flush_max_buffers, + &info->param_value); + case cache_param_cleaning_alru_activity_threshold: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_activity_threshold, + &info->param_value); + + case cache_param_cleaning_acp_wake_up_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_wake_up_time, + &info->param_value); + case cache_param_cleaning_acp_flush_max_buffers: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_flush_max_buffers, + &info->param_value); + default: + return -EINVAL; + } +} diff --git a/modules/cas_cache/layer_cache_management.c.orig b/modules/cas_cache/layer_cache_management.c.orig new file mode 100644 index 000000000..08a26a42b --- /dev/null +++ b/modules/cas_cache/layer_cache_management.c.orig @@ -0,0 +1,1615 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#include "utils/utils_blk.h" +#include "threads.h" + +extern u32 max_writeback_queue_size; +extern u32 writeback_queue_unblock_size; +extern u32 metadata_layout; +extern u32 unaligned_io; +extern u32 seq_cut_off_mb; +extern u32 use_io_scheduler; + +int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id, + bool interruption) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = ocf_mngt_core_flush(core, interruption); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_flush_device(ocf_cache_id_t id) +{ + int result; + ocf_cache_t cache; + + result = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_flush(cache, true); + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_set_policy(cache, type); + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type) +{ + ocf_cleaning_t tmp_type; + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_get_policy(cache, &tmp_type); + + if (result == 0) + *type = tmp_type; + + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t param_value) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_set_param(cache, type, + param_id, param_value); + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t *param_value) +{ + ocf_cache_t cache; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_mngt_cache_cleaning_get_param(cache, type, + param_id, param_value); + + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +struct get_paths_ctx { + char *core_path_name_tab; + int max_count; + int position; +}; + +int _cache_mng_core_pool_get_paths_visitor(ocf_uuid_t uuid, void *ctx) +{ + struct get_paths_ctx *visitor_ctx = ctx; + + if (visitor_ctx->position >= visitor_ctx->max_count) + return 0; + + if (copy_to_user((void __user *)visitor_ctx->core_path_name_tab + + (visitor_ctx->position * MAX_STR_LEN), + uuid->data, uuid->size)) { + return -ENODATA; + } + + visitor_ctx->position++; + + return 0; +} + +int cache_mng_core_pool_get_paths(struct kcas_core_pool_path *cmd_info) +{ + struct get_paths_ctx visitor_ctx = {0}; + int result; + + visitor_ctx.core_path_name_tab = cmd_info->core_path_tab; + visitor_ctx.max_count = cmd_info->core_pool_count; + + result = ocf_mngt_core_pool_visit(cas_ctx, + _cache_mng_core_pool_get_paths_visitor, + &visitor_ctx); + + cmd_info->core_pool_count = visitor_ctx.position; + return result; +} + +int cache_mng_core_pool_remove(struct kcas_core_pool_remove *cmd_info) +{ + struct ocf_volume_uuid uuid; + ocf_volume_t vol; + + uuid.data = cmd_info->core_path_name; + uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN); + + vol = ocf_mngt_core_pool_lookup(cas_ctx, &uuid, + ocf_ctx_get_volume_type(cas_ctx, + BLOCK_DEVICE_OBJECT)); + if (!vol) + return -OCF_ERR_CORE_NOT_AVAIL; + + ocf_volume_close(vol); + ocf_mngt_core_pool_remove(cas_ctx, vol); + + return 0; +} + +struct cache_mng_metadata_probe_context { + struct completion compl; + struct kcas_cache_check_device *cmd_info; + int *result; +}; + +static void cache_mng_metadata_probe_end(void *priv, int error, + struct ocf_metadata_probe_status *status) +{ + struct cache_mng_metadata_probe_context *context = priv; + struct kcas_cache_check_device *cmd_info = context->cmd_info; + + *context->result = error; + + if (error == -ENODATA || error == -EBADF) { + cmd_info->is_cache_device = false; + context->result = 0; + } else if (error == 0) { + cmd_info->is_cache_device = true; + cmd_info->clean_shutdown = status->clean_shutdown; + cmd_info->cache_dirty = status->cache_dirty; + } + + complete(&context->compl); +} + +int cache_mng_cache_check_device(struct kcas_cache_check_device *cmd_info) +{ + struct cache_mng_metadata_probe_context context; + struct block_device *bdev; + ocf_volume_t volume; + char holder[] = "CAS CHECK CACHE DEVICE\n"; + int result; + + bdev = OPEN_BDEV_EXCLUSIVE(cmd_info->path_name, FMODE_READ, holder); + if (IS_ERR(bdev)) { + return (PTR_ERR(bdev) == -EBUSY) ? + -OCF_ERR_NOT_OPEN_EXC : + -OCF_ERR_INVAL_VOLUME_TYPE; + } + + result = cas_blk_open_volume_by_bdev(&volume, bdev); + if (result) + goto out_bdev; + + cmd_info->format_atomic = (ocf_ctx_get_volume_type_id(cas_ctx, + ocf_volume_get_type(volume)) == ATOMIC_DEVICE_OBJECT); + + init_completion(&context.compl); + context.cmd_info = cmd_info; + context.result = &result; + + ocf_metadata_probe(cas_ctx, volume, cache_mng_metadata_probe_end, + &context); +<<<<<<< HEAD +======= + +>>>>>>> 05169c4c2... ASYNC LOAD/ATTACH + wait_for_completion(&context.compl); + + cas_blk_close_volume(volume); +out_bdev: + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); + return result; +} + +int cache_mng_prepare_core_cfg(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info) +{ + int result; + + if (strnlen(cmd_info->core_path_name, MAX_STR_LEN) >= MAX_STR_LEN) + return -OCF_ERR_INVAL; + + memset(cfg, 0, sizeof(*cfg)); + cfg->uuid.data = cmd_info->core_path_name; + cfg->uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN) + 1; + cfg->core_id = cmd_info->core_id; + cfg->cache_id = cmd_info->cache_id; + cfg->try_add = cmd_info->try_add; + + if (cas_upgrade_is_in_upgrade()) { + cfg->volume_type = BLOCK_DEVICE_OBJECT; + return 0; + } + + if (cmd_info->update_path) + return 0; + + result = cas_blk_identify_type(cfg->uuid.data, &cfg->volume_type); + if (!result && cfg->volume_type == ATOMIC_DEVICE_OBJECT) + result = -KCAS_ERR_NVME_BAD_FORMAT; + if (OCF_ERR_NOT_OPEN_EXC == abs(result)) { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot open device %s exclusively. " + "It is already opened by another program!\n", + cmd_info->core_path_name); + } + + return result; +} + +int cache_mng_update_core_uuid(ocf_cache_t cache, ocf_core_id_t id, ocf_uuid_t uuid) +{ + ocf_core_t core; + ocf_volume_t vol; + struct block_device *bdev; + struct bd_object *bdvol; + bool match; + int result; + + if (ocf_core_get(cache, id, &core)) { + /* no such core */ + return -ENODEV; + } + + if (ocf_core_get_state(core) != ocf_core_state_active) { + /* core inactive */ + return -ENODEV; + } + + /* get bottom device volume for this core */ + vol = ocf_core_get_volume(core); + bdvol = bd_object(vol); + + /* lookup block device object for device pointed by uuid */ + bdev = LOOKUP_BDEV(uuid->data); + if (IS_ERR(bdev)) { + printk(KERN_ERR "failed to lookup bdev%s\n", (char*)uuid->data); + return -ENODEV; + } + + /* check whether both core id and uuid point to the same block device */ + match = (bdvol->btm_bd == bdev); + + bdput(bdev); + + if (match) { + result = ocf_core_set_uuid(core, uuid); + } else { + printk(KERN_ERR "UUID provided does not match target core device\n"); + result = -ENODEV; + } + + return result; +} + +static void _cache_mng_log_core_device_path(ocf_core_t core) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + const ocf_uuid_t core_uuid = (const ocf_uuid_t)ocf_core_get_uuid(core); + + printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as core %s " + "to cache %s\n", (const char*)core_uuid->data, + ocf_core_get_name(core), ocf_cache_get_name(cache)); +} + +static int _cache_mng_log_core_device_path_visitor(ocf_core_t core, void *cntx) +{ + _cache_mng_log_core_device_path(core); + + return 0; +} + +/************************************************************ + * Function for adding a CORE object to the cache instance. * + ************************************************************/ +int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info) +{ + int result; + ocf_cache_t cache; + ocf_core_t core; + ocf_core_id_t core_id; + + result = ocf_mngt_cache_get(cas_ctx, cfg->cache_id, &cache); + if (cfg->try_add && (result == -OCF_ERR_CACHE_NOT_EXIST)) { + result = ocf_mngt_core_pool_add(cas_ctx, &cfg->uuid, + cfg->volume_type); + if (result) { + cmd_info->ext_err_code = + -OCF_ERR_CANNOT_ADD_CORE_TO_POOL; + printk(KERN_ERR OCF_PREFIX_SHORT + "Error occurred during" + " adding core to detached core pool\n"); + } else { + printk(KERN_INFO OCF_PREFIX_SHORT + "Successfully added" + " core to core pool\n"); + } + return result; + } else if (result) { + return result; + } + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (cmd_info && cmd_info->update_path) { + result = cache_mng_update_core_uuid(cache, cfg->core_id, &cfg->uuid); + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; + } + + cfg->seq_cutoff_threshold = seq_cut_off_mb * MiB; + + result = ocf_mngt_cache_add_core(cache, &core, cfg); + if (result) + goto error_affter_lock; + + core_id = ocf_core_get_id(core); + + result = block_dev_create_exported_object(core); + if (result) + goto error_after_add_core; + + result = block_dev_activate_exported_object(core); + if (result) + goto error_after_create_exported_object; + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + + if (cmd_info) + cmd_info->core_id = core_id; + + _cache_mng_log_core_device_path(core); + + return 0; + +error_after_create_exported_object: + block_dev_destroy_exported_object(core); + +error_after_add_core: + ocf_mngt_cache_remove_core(core); + +error_affter_lock: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + + return result; +} + +/* Flush cache and destroy exported object */ +int _cache_mng_remove_core_prepare(ocf_cache_t cache, ocf_core_t core, + struct kcas_remove_core *cmd, bool destroy) +{ + int result = 0; + int flush_result = 0; + bool core_active; + bool flush_interruptible = !destroy; + + core_active = (ocf_core_get_state(core) == ocf_core_state_active); + + if (cmd->detach && !core_active) { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot detach core which " + "is already inactive!\n"); + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + } + + if (core_active && destroy) { + result = block_dev_destroy_exported_object(core); + if (result) + return result; + } + + if (!cmd->force_no_flush) { + if (core_active) { + /* Flush core */ + flush_result = ocf_mngt_core_flush(core, + flush_interruptible); + } else { + printk(KERN_WARNING OCF_PREFIX_SHORT + "Cannot remove inactive core " + "without force option\n"); + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + } + } + + if (flush_result) + result = destroy ? -KCAS_ERR_REMOVED_DIRTY : flush_result; + + return result; +} + +/**************************************************************** + * Function for removing a CORE object from the cache instance + */ +int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd) +{ + int result, flush_result = 0; + ocf_cache_t cache; + ocf_core_t core; + + result = ocf_mngt_cache_get(cas_ctx, cmd->cache_id, &cache); + if (result) + return result; + + if (!cmd->force_no_flush) { + /* First check state and flush data (if requested by user) + under read lock */ + result = ocf_mngt_cache_read_lock(cache); + if (result) + goto put; + + result = ocf_core_get(cache, cmd->core_id, &core); + if (result < 0) + goto rd_unlock; + + result = _cache_mng_remove_core_prepare(cache, core, cmd, + false); + if (result) + goto rd_unlock; + + ocf_mngt_cache_read_unlock(cache); + } + + /* Acquire write lock */ + result = ocf_mngt_cache_lock(cache); + if (result) + goto put; + + result = ocf_core_get(cache, cmd->core_id, &core); + if (result < 0) { + goto unlock; + } + + /* + * Destroy exported object and flush core again but don't allow for + * interruption - in case of flush error after exported object had been + * destroyed, instead of trying rolling this back we rather detach core + * and then inform user about error. + */ + result = _cache_mng_remove_core_prepare(cache, core, cmd, true); + if (result == -KCAS_ERR_REMOVED_DIRTY) { + flush_result = result; + result = 0; + } else if (result) { + goto unlock; + } + + if (cmd->detach || flush_result) + result = ocf_mngt_cache_detach_core(core); + else + result = ocf_mngt_cache_remove_core(core); + + if (!result && flush_result) + result = flush_result; + +unlock: + ocf_mngt_cache_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; + +rd_unlock: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_reset_core_stats(ocf_cache_id_t cache_id, + ocf_core_id_t core_id) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + ocf_core_stats_initialize(core); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return 0; +} + +static inline void io_class_info2cfg(ocf_part_id_t part_id, + struct ocf_io_class_info *info, struct ocf_mngt_io_class_config *cfg) +{ + cfg->class_id = part_id; + cfg->name = info->name; + cfg->prio = info->priority; + cfg->cache_mode = info->cache_mode; + cfg->min_size = info->min_size; + cfg->max_size = info->max_size; +} + +int cache_mng_set_partitions(struct kcas_io_classes *cfg) +{ + ocf_cache_t cache; + struct ocf_mngt_io_classes_config *io_class_cfg; + ocf_part_id_t class_id; + int result; + + io_class_cfg = kzalloc(sizeof(struct ocf_mngt_io_class_config) * + OCF_IO_CLASS_MAX, GFP_KERNEL); + if (!io_class_cfg) + return -OCF_ERR_NO_MEM; + + for (class_id = 0; class_id < OCF_IO_CLASS_MAX; class_id++) { + io_class_cfg->config[class_id].class_id = class_id; + + if (!cfg->info[class_id].name[0]) { + io_class_cfg->config[class_id].class_id = class_id; + continue; + } + + io_class_info2cfg(class_id, &cfg->info[class_id], + &io_class_cfg->config[class_id]); + } + + result = ocf_mngt_cache_get(cas_ctx, cfg->cache_id, &cache); + if (result) + goto err; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + goto err; + } + + result = ocf_mngt_cache_io_classes_configure(cache, io_class_cfg); + + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + +err: + kfree(io_class_cfg); + + return result; +} + +static int _cache_mng_create_exported_object(ocf_core_t core, void *cntx) +{ + int result; + ocf_cache_t cache = ocf_core_get_cache(core); + + result = block_dev_create_exported_object(core); + if (result) { + printk(KERN_ERR "Cannot to create exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + return result; + } + + result = block_dev_activate_exported_object(core); + if (result) { + printk(KERN_ERR "Cannot to activate exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + } + + return result; +} + +static int _cache_mng_destroy_exported_object(ocf_core_t core, void *cntx) +{ + if (block_dev_destroy_exported_object(core)) { + ocf_cache_t cache = ocf_core_get_cache(core); + + printk(KERN_ERR "Cannot to destroy exported object, " + "cache id = %u, core id = %u\n", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + } + + return 0; +} + +static int cache_mng_initialize_core_objects(ocf_cache_t cache) +{ + int result; + + result = ocf_core_visit(cache, _cache_mng_create_exported_object, NULL, + true); + if (result) { + /* Need to cleanup */ + ocf_core_visit(cache, _cache_mng_destroy_exported_object, NULL, + true); + } + + return result; +} + +int cache_mng_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd) +{ + int init_cache, result; + struct atomic_dev_params atomic_params = { 0 }; + struct block_device *bdev; + int part_count; + char holder[] = "CAS START\n"; + bool is_part; + + if (strnlen(cmd->cache_path_name, MAX_STR_LEN) >= MAX_STR_LEN) + return -OCF_ERR_INVAL; + + memset(cfg, 0, sizeof(*cfg)); + + cfg->id = cmd->cache_id; + cfg->cache_mode = cmd->caching_mode; + cfg->cache_line_size = cmd->line_size; + cfg->eviction_policy = cmd->eviction_policy; + cfg->cache_line_size = cmd->line_size; + cfg->pt_unaligned_io = !unaligned_io; + cfg->use_submit_io_fast = !use_io_scheduler; + cfg->locked = true; + cfg->metadata_volatile = false; + cfg->metadata_layout = metadata_layout; + + cfg->backfill.max_queue_size = max_writeback_queue_size; + cfg->backfill.queue_unblock_size = writeback_queue_unblock_size; + + device_cfg->uuid.data = cmd->cache_path_name; + device_cfg->uuid.size = strnlen(device_cfg->uuid.data, MAX_STR_LEN) + 1; + device_cfg->cache_line_size = cmd->line_size; + device_cfg->force = cmd->force; + device_cfg->perform_test = true; + device_cfg->discard_on_start = true; + + init_cache = cmd->init_cache; + + switch (init_cache) { + case CACHE_INIT_NEW: + case CACHE_INIT_LOAD: + break; + default: + return -OCF_ERR_INVAL; + } + + bdev = OPEN_BDEV_EXCLUSIVE(device_cfg->uuid.data, FMODE_READ, holder); + if (IS_ERR(bdev)) { + return (PTR_ERR(bdev) == -EBUSY) ? + -OCF_ERR_NOT_OPEN_EXC : + -OCF_ERR_INVAL_VOLUME_TYPE; + } + + is_part = (bdev->bd_contains != bdev); + part_count = cas_blk_get_part_count(bdev); + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); + + if (!is_part && part_count > 1 && !device_cfg->force) + return -KCAS_ERR_CONTAINS_PART; + + result = cas_blk_identify_type_atomic(device_cfg->uuid.data, + &device_cfg->volume_type, &atomic_params); + if (result) + return result; + + cmd->metadata_mode_optimal = + block_dev_is_metadata_mode_optimal(&atomic_params, + device_cfg->volume_type); + + return 0; +} + +static void _cache_mng_log_cache_device_path(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg) +{ + printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as cache %s\n", + (const char*)device_cfg->uuid.data, + ocf_cache_get_name(cache)); +} + +static void _cas_queue_kick(ocf_queue_t q) +{ + return cas_kick_queue_thread(q); +} + +static void _cas_queue_stop(ocf_queue_t q) +{ + return cas_stop_queue_thread(q); +} + + +const struct ocf_queue_ops queue_ops = { + .kick = _cas_queue_kick, + .stop = _cas_queue_stop, +}; + +static int _cache_mng_start_queues(ocf_cache_t cache) +{ + uint32_t queues_no = num_online_cpus(); + ocf_queue_t *queue_map; + int result, i; + + queue_map = kcalloc(queues_no, sizeof(*queue_map), GFP_KERNEL); + if (!queue_map) + return -ENOMEM; + + for (i = 0; i < queues_no; i++) { + result = ocf_queue_create(cache, &queue_map[i], &queue_ops); + if (result) + goto err; + + result = cas_create_queue_thread(queue_map[i], i); + if (result) { + ocf_queue_put(queue_map[i]); + goto err; + } + } + + ocf_cache_set_flush_queue(cache, queue_map[0]); + + ocf_cache_set_priv(cache, queue_map); + + return 0; +err: + while (--i >= 0) + ocf_queue_put(queue_map[i]); + + kfree(queue_map); + + return result; +} +struct _cache_mng_attach_context { + struct completion compl; + int *result; +}; + +static void _cache_mng_attach_complete(void *priv, int error) +{ + struct _cache_mng_attach_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_start(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, ocf_cache_t *cache) +{ + struct _cache_mng_attach_context context; + ocf_queue_t *queue_map; + ocf_cache_t tmp_cache; + int result; + + result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); + if (result) + return result; + + result = _cache_mng_start_queues(tmp_cache); + if (result) + goto queues_err; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_attach(tmp_cache, device_cfg, + _cache_mng_attach_complete, &context); + + wait_for_completion(&context.compl); + if (result) + goto attach_err; + + _cache_mng_log_cache_device_path(tmp_cache, device_cfg); + + *cache = tmp_cache; + + return 0; + +attach_err: + queue_map = (ocf_queue_t *)ocf_cache_get_priv(tmp_cache); + kfree(queue_map); +queues_err: + ocf_mngt_cache_stop(tmp_cache); + ocf_mngt_cache_unlock(tmp_cache); + return result; +} + +struct _cache_mng_load_context { + struct completion compl; + int *result; +}; + +static void _cache_mng_load_complete(void *priv, int error) +{ + struct _cache_mng_load_context *context = priv; + + *context->result = error; + complete(&context->compl); +} + +static int _cache_mng_load(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, ocf_cache_t *cache) +{ + struct _cache_mng_load_context context; + ocf_queue_t *queue_map; + ocf_cache_t tmp_cache; + int result; + + result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); + if (result) + return result; + + result = _cache_mng_start_queues(tmp_cache); + if (result) + goto queues_err; + + init_completion(&context.compl); + context.result = &result; + + ocf_mngt_cache_load(tmp_cache, device_cfg, + _cache_mng_load_complete, &context); + + wait_for_completion(&context.compl); + if (result) + goto load_err; + + _cache_mng_log_cache_device_path(tmp_cache, device_cfg); + + result = cache_mng_initialize_core_objects(tmp_cache); + if (result) + goto load_err; + + ocf_core_visit(tmp_cache, _cache_mng_log_core_device_path_visitor, + NULL, false); + + *cache = tmp_cache; + + return 0; + +load_err: + queue_map = (ocf_queue_t *)ocf_cache_get_priv(tmp_cache); + kfree(queue_map); +queues_err: + ocf_mngt_cache_stop(*cache); + return result; +} + +int cache_mng_init_instance(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd) +{ + ocf_cache_t cache; + const char *name; + bool load = (cmd && cmd->init_cache == CACHE_INIT_LOAD); + int result; + + if (!try_module_get(THIS_MODULE)) + return -KCAS_ERR_SYSTEM; + + if (cmd) + cmd->min_free_ram = device_cfg->min_free_ram; + + /* Start cache. Returned cache instance will be locked as it was set + * in configuration. + */ + if (!load) + result = _cache_mng_start(cfg, device_cfg, &cache); + else + result = _cache_mng_load(cfg, device_cfg, &cache); + + if (result) { + module_put(THIS_MODULE); + return result; + } + + if (cmd) { + ocf_volume_t cache_obj = ocf_cache_get_volume(cache); + struct bd_object *bd_cache_obj = bd_object(cache_obj); + struct block_device *bdev = bd_cache_obj->btm_bd; + + /* If we deal with whole device, reread partitions */ + if (bdev->bd_contains == bdev) + ioctl_by_bdev(bdev, BLKRRPART, (unsigned long)NULL); + + /* Set other back information */ + name = block_dev_get_elevator_name( + casdsk_disk_get_queue(bd_cache_obj->dsk)); + if (name) + strlcpy(cmd->cache_elevator, + name, MAX_ELEVATOR_NAME); + } + + ocf_mngt_cache_unlock(cache); + + return 0; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] cache_id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[in] thresh new sequential cutoff threshold value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t cache_id, ocf_core_id_t core_id, + uint32_t thresh) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (core_id != OCF_CORE_ID_INVALID) { + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + result = ocf_mngt_core_set_seq_cutoff_threshold(core, thresh); + } else { + result = ocf_mngt_core_set_seq_cutoff_threshold_all(cache, + thresh); + } + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[in] policy new sequential cutoff policy value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy policy) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (core_id != OCF_CORE_ID_INVALID) { + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + result = ocf_mngt_core_set_seq_cutoff_policy(core, policy); + } else { + result = ocf_mngt_core_set_seq_cutoff_policy_all(cache, policy); + } + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] cache_id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[out] thresh new sequential cutoff threshold value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t cache_id, + ocf_core_id_t core_id, uint32_t *thresh) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = ocf_mngt_core_get_seq_cutoff_threshold(core, thresh); + +out: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic sequential cutoff parameter switching + * @param[in] id cache id to which the change pertains + * @param[in] core_id core id to which the change pertains + * or OCF_CORE_ID_INVALID for setting value for all cores + * attached to specified cache + * @param[out] policy new sequential cutoff policy value + * @return exit code of successful completion is 0; + * nonzero exit code means failure + */ + +int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy *policy) +{ + ocf_cache_t cache; + ocf_core_t core; + int result; + + result = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + result = ocf_mngt_core_get_seq_cutoff_policy(core, policy); + +out: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implementing dynamic cache mode switching + * @param device caching device to which operation applies + * @param mode target mode (WRITE_THROUGH, WRITE_BACK, WRITE_AROUND etc.) + * @param flush shall we flush dirty data during switch, or shall we flush + * all remaining dirty data before entering new mode? + */ + +int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode, + uint8_t flush) +{ + int result; + ocf_cache_t cache; + + result = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + if (flush) { + result = ocf_mngt_cache_flush(cache, true); + if (result) + goto out; + } + + result = ocf_mngt_cache_set_mode(cache, mode, flush); + +out: + ocf_mngt_cache_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +/** + * @brief routine implements --remove-cache command. + * @param[in] device caching device to be removed + * @param[in] flush Boolean: shall we flush dirty data before removing cache. + * if yes, flushing may still be interrupted by user (in which case + * device won't be actually removed and error will be returned) + * @param[in] allow_interruption shall we allow interruption of dirty + * data flushing + */ +int cache_mng_exit_instance(ocf_cache_id_t id, int flush) +{ + ocf_cache_t cache; + ocf_queue_t *queue_map; + int status, flush_status = 0; + + /* Get cache */ + status = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (status) + return status; + + queue_map = (ocf_queue_t *)ocf_cache_get_priv(cache); + + status = ocf_mngt_cache_read_lock(cache); + if (status) + goto put; + /* + * Flush cache. Flushing may take a long time, so we allow user + * to interrupt this operation. Hence we do first flush before + * disabling exported object to avoid restoring it in case + * of interruption. That means some new dirty data could appear + * in cache during flush operation which will not be flushed + * this time, so we need to flush cache again after disabling + * exported object. The second flush should be much faster. + */ + if (flush) { + status = ocf_mngt_cache_flush(cache, true); + switch (status) { + case -OCF_ERR_CACHE_IN_INCOMPLETE_STATE: + case -OCF_ERR_FLUSHING_INTERRUPTED: + ocf_mngt_cache_read_unlock(cache); + goto put; + default: + flush_status = status; + break; + } + } + + ocf_mngt_cache_read_unlock(cache); + + /* get cache write lock */ + status = ocf_mngt_cache_lock(cache); + if (status) + goto put; + + if (!cas_upgrade_is_in_upgrade()) { + /* If we are not in upgrade - destroy cache devices */ + status = block_dev_destroy_all_exported_objects(cache); + if (status != 0) { + printk(KERN_WARNING + "Failed to remove all cached devices\n"); + goto unlock; + } + } else { + if (flush_status) { + status = flush_status; + goto unlock; + } + /* + * We are being switched to upgrade in flight mode - + * wait for finishing pending core requests + */ + cache_mng_wait_for_rq_finish(cache); + } + + /* Flush cache again. This time we don't allow interruption. */ + if (flush) + flush_status = ocf_mngt_cache_flush(cache, false); + + /* Stop cache device */ + status = ocf_mngt_cache_stop(cache); + + if (!status && flush_status) + status = -KCAS_ERR_STOPPED_DIRTY; + + module_put(THIS_MODULE); + + kfree(queue_map); + +unlock: + ocf_mngt_cache_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return status; +} + +static int cache_mng_list_caches_visitor(ocf_cache_t cache, void *cntx) +{ + ocf_cache_id_t id = ocf_cache_get_id(cache); + struct kcas_cache_list *list = cntx; + + if (list->id_position >= id) + return 0; + + if (list->in_out_num >= ARRAY_SIZE(list->cache_id_tab)) + return 1; + + list->cache_id_tab[list->in_out_num] = id; + list->in_out_num++; + + return 0; +} + +int cache_mng_list_caches(struct kcas_cache_list *list) +{ + list->in_out_num = 0; + return ocf_mngt_cache_visit(cas_ctx, cache_mng_list_caches_visitor, list); +} + +int cache_mng_interrupt_flushing(ocf_cache_id_t id) +{ + int result; + ocf_cache_t cache; + + result = ocf_mngt_cache_get(cas_ctx, id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_flush_interrupt(cache); + + ocf_mngt_cache_put(cache); + + return result; + +} + +int cache_mng_get_info(struct kcas_cache_info *info) +{ + uint32_t i, j; + int result; + ocf_cache_t cache; + ocf_core_t core; + const struct ocf_volume_uuid *uuid; + + result = ocf_mngt_cache_get(cas_ctx, info->cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + goto put; + + result = ocf_cache_get_info(cache, &info->info); + if (result) + goto unlock; + + if (info->info.attached) { + uuid = ocf_cache_get_uuid(cache); + strlcpy(info->cache_path_name, uuid->data, + min(sizeof(info->cache_path_name), uuid->size)); + + switch (info->info.volume_type) { + case BLOCK_DEVICE_OBJECT: + info->metadata_mode = CAS_METADATA_MODE_NORMAL; + break; + case ATOMIC_DEVICE_OBJECT: + info->metadata_mode = CAS_METADATA_MODE_ATOMIC; + break; + default: + info->metadata_mode = CAS_METADATA_MODE_INVALID; + break; + } + } + + /* Collect cores IDs */ + for (i = 0, j = 0; j < info->info.core_count && + i < OCF_CORE_MAX; i++) { + if (ocf_core_get(cache, i, &core)) + continue; + + info->core_id[j] = i; + j++; + } + +unlock: + ocf_mngt_cache_read_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_io_class_info(struct kcas_io_class *part) +{ + int result; + ocf_cache_id_t cache_id = part->cache_id; + ocf_core_id_t core_id = part->core_id; + uint32_t io_class_id = part->class_id; + ocf_cache_t cache; + ocf_core_t core; + + result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + result = ocf_cache_io_class_get_info(cache, io_class_id, &part->info); + if (result) + goto end; + + if (part->get_stats) { + result = ocf_core_get(cache, core_id, &core); + if (result < 0) { + result = OCF_ERR_CORE_NOT_AVAIL; + goto end; + } + + result = ocf_core_io_class_get_stats(core, io_class_id, + &part->stats); + } + +end: + ocf_mngt_cache_read_unlock(cache); + ocf_mngt_cache_put(cache); + return result; +} + +int cache_mng_get_core_info(struct kcas_core_info *info) +{ + ocf_cache_t cache; + ocf_core_t core; + const struct ocf_volume_uuid *uuid; + int result; + + result = ocf_mngt_cache_get(cas_ctx, info->cache_id, &cache); + if (result) + return result; + + result = ocf_mngt_cache_read_lock(cache); + if(result) + goto put; + + result = ocf_core_get(cache, info->core_id, &core); + if (result < 0) { + result = OCF_ERR_CORE_NOT_AVAIL; + goto unlock; + } + + result = ocf_core_get_stats(core, &info->stats); + if (result) + goto unlock; + + uuid = ocf_core_get_uuid(core); + + strlcpy(info->core_path_name, uuid->data, + min(sizeof(info->core_path_name), uuid->size)); + + info->state = ocf_core_get_state(core); + +unlock: + ocf_mngt_cache_read_unlock(cache); +put: + ocf_mngt_cache_put(cache); + return result; +} + +static int cache_mng_wait_for_rq_finish_visitor(ocf_core_t core, void *cntx) +{ + ocf_volume_t obj = ocf_core_get_volume(core); + struct bd_object *bdobj = bd_object(obj); + + while (atomic64_read(&bdobj->pending_rqs)) + io_schedule(); + + return 0; +} + +void cache_mng_wait_for_rq_finish(ocf_cache_t cache) +{ + ocf_core_visit(cache, cache_mng_wait_for_rq_finish_visitor, NULL, true); +} + +int cache_mng_set_core_params(struct kcas_set_core_param *info) +{ + switch (info->param_id) { + case core_param_seq_cutoff_threshold: + return cache_mng_set_seq_cutoff_threshold(info->cache_id, + info->core_id, info->param_value); + case core_param_seq_cutoff_policy: + return cache_mng_set_seq_cutoff_policy(info->cache_id, + info->core_id, info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_get_core_params(struct kcas_get_core_param *info) +{ + switch (info->param_id) { + case core_param_seq_cutoff_threshold: + return cache_mng_get_seq_cutoff_threshold(info->cache_id, + info->core_id, &info->param_value); + case core_param_seq_cutoff_policy: + return cache_mng_get_seq_cutoff_policy(info->cache_id, + info->core_id, &info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_set_cache_params(struct kcas_set_cache_param *info) +{ + switch (info->param_id) { + case cache_param_cleaning_policy_type: + return cache_mng_set_cleaning_policy(info->cache_id, + info->param_value); + + case cache_param_cleaning_alru_wake_up_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_wake_up_time, + info->param_value); + case cache_param_cleaning_alru_stale_buffer_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_stale_buffer_time, + info->param_value); + case cache_param_cleaning_alru_flush_max_buffers: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_flush_max_buffers, + info->param_value); + case cache_param_cleaning_alru_activity_threshold: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_activity_threshold, + info->param_value); + + case cache_param_cleaning_acp_wake_up_time: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_wake_up_time, + info->param_value); + case cache_param_cleaning_acp_flush_max_buffers: + return cache_mng_set_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_flush_max_buffers, + info->param_value); + default: + return -EINVAL; + } +} + +int cache_mng_get_cache_params(struct kcas_get_cache_param *info) +{ + switch (info->param_id) { + case cache_param_cleaning_policy_type: + return cache_mng_get_cleaning_policy(info->cache_id, + &info->param_value); + + case cache_param_cleaning_alru_wake_up_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_wake_up_time, + &info->param_value); + case cache_param_cleaning_alru_stale_buffer_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_stale_buffer_time, + &info->param_value); + case cache_param_cleaning_alru_flush_max_buffers: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_flush_max_buffers, + &info->param_value); + case cache_param_cleaning_alru_activity_threshold: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_alru, ocf_alru_activity_threshold, + &info->param_value); + + case cache_param_cleaning_acp_wake_up_time: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_wake_up_time, + &info->param_value); + case cache_param_cleaning_acp_flush_max_buffers: + return cache_mng_get_cleaning_param(info->cache_id, + ocf_cleaning_acp, ocf_acp_flush_max_buffers, + &info->param_value); + default: + return -EINVAL; + } +} diff --git a/modules/cas_cache/layer_cache_management.h b/modules/cas_cache/layer_cache_management.h new file mode 100644 index 000000000..4d484e64b --- /dev/null +++ b/modules/cas_cache/layer_cache_management.h @@ -0,0 +1,92 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __LAYER_CACHE_MANAGEMENT_H__ +#define __LAYER_CACHE_MANAGEMENT_H__ + +#define CAS_BLK_DEV_REQ_TYPE_BIO 1 +#define CAS_BLK_DEV_REQ_TYPE_REQ 3 + +int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type); + +int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type); + +int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t param_value); + +int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, + uint32_t param_id, uint32_t *param_value); + +int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info); + +int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd); + +int cache_mng_reset_core_stats(ocf_cache_id_t cache_id, + ocf_core_id_t core_id); + +int cache_mng_set_partitions(struct kcas_io_classes *cfg); + +int cache_mng_exit_instance(ocf_cache_id_t id, int flush); + +int cache_mng_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd); + +int cache_mng_core_pool_get_paths(struct kcas_core_pool_path *cmd_info); + +int cache_mng_core_pool_remove(struct kcas_core_pool_remove *cmd_info); + +int cache_mng_cache_check_device(struct kcas_cache_check_device *cmd_info); + +int cache_mng_prepare_core_cfg(struct ocf_mngt_core_config *cfg, + struct kcas_insert_core *cmd_info); + +int cache_mng_init_instance(struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg, + struct kcas_start_cache *cmd); + +int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t id, ocf_core_id_t core_id, + uint32_t thresh); + +int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy policy); + +int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t id, ocf_core_id_t core_id, + uint32_t *thresh); + +int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, + ocf_seq_cutoff_policy *policy); + +int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode, + uint8_t flush); + +int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id); + +int cache_mng_flush_device(ocf_cache_id_t id); + +ocf_cache_line_t cache_mng_lookup(ocf_cache_t cache, + ocf_core_id_t core_id, uint64_t core_cacheline); + +int cache_mng_list_caches(struct kcas_cache_list *list); + +int cache_mng_interrupt_flushing(ocf_cache_id_t id); + +int cache_mng_get_info(struct kcas_cache_info *info); + +int cache_mng_get_io_class_info(struct kcas_io_class *part); + +int cache_mng_get_core_info(struct kcas_core_info *info); + +void cache_mng_wait_for_rq_finish(ocf_cache_t cache); + +int cache_mng_set_core_params(struct kcas_set_core_param *info); + +int cache_mng_get_core_params(struct kcas_get_core_param *info); + +int cache_mng_set_cache_params(struct kcas_set_cache_param *info); + +int cache_mng_get_cache_params(struct kcas_get_cache_param *info); + +#endif diff --git a/modules/cas_cache/layer_upgrade.c b/modules/cas_cache/layer_upgrade.c new file mode 100644 index 000000000..0adbfec4f --- /dev/null +++ b/modules/cas_cache/layer_upgrade.c @@ -0,0 +1,1495 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +#define CAS_UPGRADE_DEBUG 0 + +#if 1 == CAS_UPGRADE_DEBUG +#define CAS_DEBUG_TRACE() \ + printk(KERN_INFO "[Upgrade] %s\n", __func__) + +#define CAS_DEBUG_MSG(msg) \ + printk(KERN_INFO "[Upgrade] %s - %s\n", __func__, msg) + +#define CAS_DEBUG_PARAM(format, ...) \ + printk(KERN_INFO "[Upgrade] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define CAS_DEBUG_TRACE() +#define CAS_DEBUG_MSG(msg) +#define CAS_DEBUG_PARAM(format, ...) +#endif + +extern u32 max_writeback_queue_size; +extern u32 writeback_queue_unblock_size; +extern u32 metadata_layout; +extern u32 unaligned_io; +extern u32 seq_cut_off_mb; +extern u32 use_io_scheduler; + +typedef int (*restore_callback_t) (struct cas_properties *cache_props); + +static void _cas_upgrade_clear_state(void) +{ + in_upgrade = false; +} + +static void _cas_upgrade_set_state(void) +{ + in_upgrade = true; +} + +bool cas_upgrade_is_in_upgrade(void) +{ + return in_upgrade; +} + +/* + * Caches parameters to serialize + * +------------+-------------------------------+---------------+ + * |Group | Key | Type | + * |------------|-------------------------------|---------------| + * |cache | cache_id | uint | + * |cache | cache_path | string | + * |cache | cache_type | uint | + * |cache | cache_line_size | uint | + * |cache | cache_evp_policy | uint | + * |cache | cache_mode | uint | + * |cache | cache_seq_cutoff_thresh | uint | + * |cache | cache_seq_cutoff_policy | uint | + * |------------|-------------------------------|---------------| + * |core | core_no | uint | + * |core | core_X_id | uint | + * |core | core_X_path | string | + * |core | core_X_type | uint | + * |------------|-------------------------------|---------------| + * |flush | flush_cleaning_policy | uint | + * |flush | flush_wake_up_time | uint | + * |flush | flush_staleness_time | uint | + * |flush | flush_max_buffers | uint | + * |flush | flush_threshold | uint | + * |flush | flush_acp_wake_up_time | uint | + * |flush | flush_acp_max_buffers | uint | + * |------------|-------------------------------|---------------| + * |io_class | io_class_no | uint | + * |io_class | io_class_X_name | string | + * |io_class | io_class_X_id | uint | + * |io_class | io_class_X_max | uint | + * |io_class | io_class_X_min | uint | + * |io_class | io_class_X_cache_mode | uint | + * |io_class | io_class_X_prio | uint | + * +------------+-------------------------------+---------------+ + * + */ + +#define UPGRADE_IFACE_VERSION_STR "upgrade_iface_version" + +#define CACHE_ID_STR "cache_id" +#define CACHE_PATH_STR "cache_path" +#define CACHE_LINE_SIZE_STR "cache_line_size" +#define CACHE_TYPE_STR "cache_type" +#define CACHE_MODE_STR "cache_mode" + +#define CORE_NO_STR "core_no" +#define CORE_ID_STR "core_%lu_id" +#define CORE_PATH_STR "core_%lu_path" +#define CORE_SEQ_CUTOFF_THRESHOLD_STR "core_%lu_seq_cutoff_thresh" +#define CORE_SEQ_CUTOFF_POLICY_STR "core_%lu_seq_cutoff_policy" + +#define CLEANING_POLICY_STR "flush_cleaning_policy" +#define CLEANING_ALRU_WAKEUP_TIME_STR "flush_wakeup_time" +#define CLEANING_ALRU_STALENESS_TIME_STR "flush_staleness_time" +#define CLEANING_ALRU_MAX_BUFFERS_STR "flush_max_buffers" +#define CLEANING_ALRU_TRESHOLD_STR "flush_threshold" +#define CLEANING_ACP_WAKEUP_TIME_STR "flush_acp_wakeup_time" +#define CLEANING_ACP_MAX_BUFFERS_STR "flush_acp_max_buffers" + +#define IO_CLASS_NO_STR "io_class_no" +#define IO_CLASS_NAME_STR "io_class_%lu_name" +#define IO_CLASS_MIN_STR "io_class_%lu_min" +#define IO_CLASS_ID_STR "io_class_%lu_id" +#define IO_CLASS_MAX_STR "io_class_%lu_max" +#define IO_CLASS_PRIO_STR "io_class_%lu_prio" +#define IO_CLASS_CACHE_MODE_STR "io_class_%lu_cache_mode" + +#define CAS_UPGRADE_IFACE_VERSION_19_03_00 190300 +#define CAS_UPGRADE_IFACE_CURRENT_VERSION CAS_UPGRADE_IFACE_VERSION_19_03_00 + +static int _cas_upgrade_dump_cache_conf_main(ocf_cache_t cache, + struct cas_properties *cache_props) +{ + int result = 0; + + CAS_DEBUG_TRACE(); + + result = cas_properties_add_uint(cache_props, UPGRADE_IFACE_VERSION_STR, + (uint64_t) CAS_UPGRADE_IFACE_CURRENT_VERSION, + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding interface version\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, CACHE_ID_STR, + (uint64_t) ocf_cache_get_id(cache), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding cache_id\n"); + return result; + } + + result = cas_properties_add_string(cache_props, CACHE_PATH_STR, + ocf_cache_get_uuid(cache)->data, + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding cache_path\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, CACHE_LINE_SIZE_STR, + (uint64_t) ocf_cache_get_line_size(cache), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding cache_line_size\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, CACHE_TYPE_STR, + (uint64_t) ocf_cache_get_type_id(cache), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT OCF_PREFIX_SHORT + "Error during adding cache_type\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, CACHE_MODE_STR, + (uint64_t) ocf_cache_get_mode(cache), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT OCF_PREFIX_SHORT + "Error during adding cache_mode\n"); + return result; + } + + return result; +} + +struct _ocf_core_visitor_ctx { + int i; + struct cas_properties *cache_props; + int error; +}; + +int _cas_upgrade_core_visitor(ocf_core_t core, void *cntx) +{ + int result = 0; + char *value = NULL; + uint32_t core_idx = ocf_core_get_id(core); + struct _ocf_core_visitor_ctx *core_visitor_ctx = + (struct _ocf_core_visitor_ctx*) cntx; + struct cas_properties *cache_props = core_visitor_ctx->cache_props; + unsigned long core_no = 0; + + core_visitor_ctx->i++; + core_no = core_visitor_ctx->i; + + value = kmalloc(sizeof(*value) * MAX_STR_LEN, GFP_KERNEL); + if (value == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + result = snprintf(value, MAX_STR_LEN, CORE_ID_STR, core_no); + if (result < 0) + goto err; + + result = cas_properties_add_uint(cache_props, value, core_idx, + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT OCF_PREFIX_SHORT + "Error during adding core id\n"); + goto err; + } + + result = snprintf(value, MAX_STR_LEN, CORE_PATH_STR, + core_no); + if (result < 0) + goto err; + + result = cas_properties_add_string(cache_props, value, + ocf_core_get_uuid(core)->data, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT OCF_PREFIX_SHORT + "Error during adding core path\n"); + goto err; + } + + result = snprintf(value, MAX_STR_LEN, CORE_SEQ_CUTOFF_POLICY_STR, core_no); + if (result < 0) + goto err; + + result = cas_properties_add_uint(cache_props, value, + (uint64_t) ocf_core_get_seq_cutoff_policy(core), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR "Error during adding core seq cutoff policy\n"); + goto err; + } + + result = snprintf(value, MAX_STR_LEN, CORE_SEQ_CUTOFF_THRESHOLD_STR, core_no); + if (result < 0) + goto err; + + result = cas_properties_add_uint(cache_props, value, + (uint64_t) ocf_core_get_seq_cutoff_threshold(core), + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR "Error during adding core seq cutoff threshold\n"); + goto err; + } + +err: + kfree(value); + core_visitor_ctx->error = result; + return result; +} + +static int _cas_upgrade_dump_cache_conf_cores(ocf_cache_t device, + struct cas_properties *cache_props) +{ + int result = 0; + struct _ocf_core_visitor_ctx core_visitor_ctx; + char *value = NULL; + + CAS_DEBUG_TRACE(); + + value = kmalloc(sizeof(*value) * MAX_STR_LEN, GFP_KERNEL); + if (value == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + result = cas_properties_add_uint(cache_props, CORE_NO_STR, + (uint64_t) ocf_cache_get_core_count(device), + CAS_PROPERTIES_NON_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT OCF_PREFIX_SHORT + "Error during adding cores number\n"); + goto err; + } + + memset(&core_visitor_ctx, 0, sizeof(core_visitor_ctx)); + core_visitor_ctx.cache_props = cache_props; + + result |= ocf_core_visit(device, _cas_upgrade_core_visitor, + &core_visitor_ctx, true); + if (core_visitor_ctx.error) { + result = core_visitor_ctx.error; + goto err; + } + + if (core_visitor_ctx.i > ocf_cache_get_core_count(device)) { + result = -OCF_ERR_INVAL; + goto err; + } + +err: + kfree(value); + return result; +} + +static int _cas_upgrade_dump_cache_conf_flush(ocf_cache_t cache, + struct cas_properties *cache_props) +{ + ocf_cache_id_t cache_id = ocf_cache_get_id(cache); + uint32_t cleaning_type; + uint32_t alru_thread_wakeup_time; + uint32_t alru_stale_buffer_time; + uint32_t alru_flush_max_buffers; + uint32_t alru_activity_threshold; + uint32_t acp_thread_wakeup_time; + uint32_t acp_flush_max_buffers; + int result = 0; + + CAS_DEBUG_TRACE(); + + result |= cache_mng_get_cleaning_policy(cache_id, &cleaning_type); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_wake_up_time, &alru_thread_wakeup_time); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_stale_buffer_time, &alru_stale_buffer_time); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_flush_max_buffers, &alru_flush_max_buffers); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_activity_threshold, &alru_activity_threshold); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_acp, + ocf_acp_wake_up_time, &acp_thread_wakeup_time); + result |= cache_mng_get_cleaning_param(cache_id, ocf_cleaning_acp, + ocf_acp_flush_max_buffers, &acp_flush_max_buffers); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Unable to get cleaning policy params\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, CLEANING_POLICY_STR, + cleaning_type, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding cleaning policy type\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ALRU_WAKEUP_TIME_STR, + alru_thread_wakeup_time, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding alru wakeup time\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ALRU_STALENESS_TIME_STR, + alru_stale_buffer_time, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding alru staleness time\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ALRU_MAX_BUFFERS_STR, + alru_flush_max_buffers, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding alru max flush buffers\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ALRU_TRESHOLD_STR, + alru_activity_threshold, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding alru flush threshold\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ACP_WAKEUP_TIME_STR, + acp_thread_wakeup_time, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding acp wakeup time\n"); + return result; + } + + result = cas_properties_add_uint(cache_props, + CLEANING_ACP_MAX_BUFFERS_STR, + acp_flush_max_buffers, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding acp max flush buffers\n"); + return result; + } + + return result; +} + +struct _cas_upgrade_dump_io_class_visit_ctx { + struct cas_properties *cache_props; + uint32_t io_class_counter; + int error; +}; + +int _cas_upgrade_dump_io_class_visitor(ocf_cache_t cache, + uint32_t io_class_id, void *ctx) +{ + int result = 0; + struct ocf_io_class_info info; + struct _cas_upgrade_dump_io_class_visit_ctx *io_class_visit_ctx = + (struct _cas_upgrade_dump_io_class_visit_ctx*) ctx; + char *key = NULL; + struct cas_properties *cache_props = io_class_visit_ctx->cache_props; + unsigned long io_class_counter; + + CAS_DEBUG_TRACE(); + + key = kmalloc(sizeof(*key) * 4096, GFP_KERNEL); + if (key == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + result = ocf_cache_io_class_get_info(cache, io_class_id, &info); + if (result) + goto error; + + io_class_visit_ctx->io_class_counter++; + io_class_counter = io_class_visit_ctx->io_class_counter; + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_NAME_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_string(cache_props, key, + info.name, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class name\n"); + goto error; + } + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_MIN_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_uint(cache_props, key, + info.min_size, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class min size\n"); + goto error; + } + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_ID_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_uint(cache_props, key, + io_class_id, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class id\n"); + goto error; + } + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_MAX_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_uint(cache_props, key, + info.max_size, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class max size\n"); + goto error; + } + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_PRIO_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_uint(cache_props, key, + info.priority, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class priority\n"); + goto error; + } + + result = snprintf(key, MAX_STR_LEN, + IO_CLASS_CACHE_MODE_STR, io_class_counter); + if (result < 0) + goto error; + + result = cas_properties_add_uint(cache_props, key, + info.cache_mode, CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class cache mode\n"); + goto error; + } + +error: + kfree(key); + io_class_visit_ctx->error = result; + return result; + +} + +static int _cas_upgrade_dump_cache_conf_io_class(ocf_cache_t cache, + struct cas_properties *cache_props) +{ + int result = 0; + struct _cas_upgrade_dump_io_class_visit_ctx io_class_visit_ctx; + + CAS_DEBUG_TRACE(); + + result = cas_properties_add_uint(cache_props, IO_CLASS_NO_STR, 0, + CAS_PROPERTIES_NON_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class number\n"); + goto error_after_alloc_buffer; + } + + memset(&io_class_visit_ctx, 0, sizeof(io_class_visit_ctx)); + io_class_visit_ctx.cache_props = cache_props; + + ocf_io_class_visit(cache, _cas_upgrade_dump_io_class_visitor, + &io_class_visit_ctx); + if (io_class_visit_ctx.error) { + result = io_class_visit_ctx.error; + goto error_after_alloc_buffer; + } + + result = cas_properties_add_uint(cache_props, IO_CLASS_NO_STR, + io_class_visit_ctx.io_class_counter, + CAS_PROPERTIES_CONST); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during adding io class number\n"); + goto error_after_alloc_buffer; + } + +error_after_alloc_buffer: + + return result; +} + +static int _cas_upgrade_dump_cache_conf(ocf_cache_t device, + struct cas_properties *cache_props) +{ + int result = 0; + + CAS_DEBUG_TRACE(); + + result = _cas_upgrade_dump_cache_conf_main(device, cache_props); + if (result) + return result; + + result = _cas_upgrade_dump_cache_conf_cores(device, cache_props); + if (result) + return result; + + result = _cas_upgrade_dump_cache_conf_flush(device, cache_props); + if (result) + return result; + + result = _cas_upgrade_dump_cache_conf_io_class(device, cache_props); + if (result) + return result; + + return result; +} + +static void _cas_upgrade_destroy_props_array( + struct cas_properties **caches_props_array, int count) +{ + int i; + + CAS_DEBUG_TRACE(); + + for (i = 0; i < count ; i++) { + if (caches_props_array[i] && !IS_ERR(caches_props_array[i])) + cas_properties_destroy(caches_props_array[i]); + caches_props_array[i] = NULL; + } + +} + +static int _cas_upgrade_init_props_array( + struct cas_properties **caches_props_array, int count) +{ + int i, result = 0; + + CAS_DEBUG_TRACE(); + + for (i = 0; i < count ; i++) { + caches_props_array[i] = cas_properties_create(); + if (IS_ERR(caches_props_array[i])) { + result = PTR_ERR(caches_props_array[i]); + break; + } + } + + if (result) + _cas_upgrade_destroy_props_array(caches_props_array, i); + + return result; +} + +struct _cas_cache_dump_conf_visitor_ctx { + int i; + struct cas_properties **caches_props_array; + struct casdsk_props_conf *caches_serialized_conf; + int error; + +}; + +int _cas_upgrade_dump_cache_conf_visitor(ocf_cache_t cache, void *cntx) +{ + int result = 0; + + struct _cas_cache_dump_conf_visitor_ctx *cache_visit_ctx = + (struct _cas_cache_dump_conf_visitor_ctx*) cntx; + struct cas_properties **caches_props_array = + cache_visit_ctx->caches_props_array; + struct casdsk_props_conf *caches_serialized_conf = + cache_visit_ctx->caches_serialized_conf; + + result = _cas_upgrade_dump_cache_conf(cache, + caches_props_array[cache_visit_ctx->i]); + if (result) + goto error; + + result = cas_properties_serialize( + caches_props_array[cache_visit_ctx->i], + &caches_serialized_conf[cache_visit_ctx->i]); + +error: + cache_visit_ctx->i++; + cache_visit_ctx->error = result; + return result; +} + +static int _cas_upgrade_dump_conf(void) +{ + int result = 0, i = 0; + size_t caches_no = 0; + struct casdsk_props_conf *caches_serialized_conf = NULL; + struct _cas_cache_dump_conf_visitor_ctx cache_visit_ctx; + struct cas_properties **caches_props_array; + + CAS_DEBUG_TRACE(); + + caches_no = ocf_mngt_cache_get_count(cas_ctx); + if (caches_no == 0) + return 0; + + caches_props_array = kcalloc(caches_no, + sizeof(*caches_props_array), GFP_KERNEL); + if (caches_props_array == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + caches_serialized_conf = kcalloc(caches_no, + sizeof(*caches_serialized_conf), GFP_KERNEL); + if (caches_serialized_conf == NULL) { + kfree(caches_props_array); + result = -OCF_ERR_NO_MEM; + return result; + } + + result = _cas_upgrade_init_props_array(caches_props_array, caches_no); + if (result) { + kfree(caches_props_array); + kfree(caches_serialized_conf); + return result; + } + + /* Set up visitor context */ + memset(&cache_visit_ctx, 0, sizeof(cache_visit_ctx)); + cache_visit_ctx.caches_props_array = caches_props_array; + cache_visit_ctx.caches_serialized_conf = caches_serialized_conf; + + result = ocf_mngt_cache_visit(cas_ctx, _cas_upgrade_dump_cache_conf_visitor, + &cache_visit_ctx); + if (result || cache_visit_ctx.error) { + result |= cache_visit_ctx.error; + goto err_after_init_props_array; + } + + CAS_DEBUG_MSG("End of dump"); + + casdisk_functions.casdsk_store_config(caches_no, caches_serialized_conf); + + CAS_DEBUG_MSG("Configuration stored to idisk"); + +err_after_init_props_array: + if (result) { + CAS_DEBUG_MSG("End of dump: ERROR"); + for (; i >= 0; i--) + kfree(caches_serialized_conf[i].buffer); + + kfree(caches_serialized_conf); + caches_no = 0; + } + _cas_upgrade_destroy_props_array(caches_props_array, caches_no); + kfree(caches_props_array); + return result; +} + +int cas_upgrade_set_pt_and_flush_visitor_core(ocf_core_t core, void *cntx) +{ + int *result = (int*) cntx; + ocf_volume_t vol; + + vol = ocf_core_get_volume(core); + *result = casdisk_functions.casdsk_disk_set_pt(bd_object(vol)->dsk); + + return *result; +} + +int _cas_upgrade_set_pt_and_flush_visitor_cache(ocf_cache_t cache, void *cntx) +{ + int *result = (int*) cntx; + int cache_id = ocf_cache_get_id(cache); + + *result = cache_mng_set_cache_mode(cache_id, ocf_cache_mode_pt, false); + if (*result) + return *result; + + *result = cache_mng_flush_device(cache_id); + if (*result) + return *result; + + ocf_core_visit(cache, cas_upgrade_set_pt_and_flush_visitor_core, + result, true); + + return *result; +} + +static int _cas_upgrade_set_pt_and_flush(void) +{ + int result = 0, r = 0; + + CAS_DEBUG_TRACE(); + + r = ocf_mngt_cache_visit_reverse(cas_ctx, + _cas_upgrade_set_pt_and_flush_visitor_cache, &result); + result |= r; + + return result; +} + +int _cas_upgrade_stop_devices_visitor_wait(ocf_cache_t cache, void *cntx) +{ + cache_mng_wait_for_rq_finish(cache); + + return 0; +} + +int _cas_upgrade_stop_devices_visitor_exit(ocf_cache_t cache, void *cntx) +{ + int *result = (int*) cntx; + + *result = cache_mng_exit_instance(ocf_cache_get_id(cache), true); + + return *result; +} + +static int _cas_upgrade_stop_devices(void) +{ + int result = 0, r = 0; + + CAS_DEBUG_TRACE(); + + r = ocf_mngt_cache_visit(cas_ctx, _cas_upgrade_stop_devices_visitor_wait, + NULL); + if (r) + return r; + + r = ocf_mngt_cache_visit_reverse(cas_ctx, + _cas_upgrade_stop_devices_visitor_exit, &result); + result |= r; + + return result; +} + +static int _cas_upgrade_restore_conf_main(struct cas_properties *cache_props, + uint64_t *cache_id) +{ + int result = 0; + uint64_t cache_mode, cache_line_size; + uint64_t cache_type, version; + char *cache_path = NULL; + struct ocf_mngt_cache_config cfg; + struct ocf_mngt_cache_device_config device_cfg; + + CAS_DEBUG_TRACE(); + + cache_path = kzalloc(sizeof(*cache_path) * MAX_STR_LEN, GFP_KERNEL); + if (cache_path == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + result = cas_properties_get_uint(cache_props, UPGRADE_IFACE_VERSION_STR, + &version); + if (result) + goto error; + + result = cas_properties_get_uint(cache_props, CACHE_ID_STR, cache_id); + if (result) + goto error; + + result = cas_properties_get_string(cache_props, CACHE_PATH_STR, + cache_path, MAX_STR_LEN); + if (result) + goto error; + + result = cas_properties_get_uint(cache_props, CACHE_TYPE_STR, + &cache_type); + if (result) + goto error; + + result = cas_properties_get_uint(cache_props, CACHE_LINE_SIZE_STR, + &cache_line_size); + if (result) + goto error; + + result = cas_properties_get_uint(cache_props, CACHE_MODE_STR, + &cache_mode); + if (result) + goto error; + + if (cache_mode >= ocf_cache_mode_max) + cache_mode = ocf_cache_mode_default; + + memset(&cfg, 0, sizeof(cfg)); + memset(&device_cfg, 0, sizeof(device_cfg)); + + cfg.id = *cache_id; + cfg.cache_mode = cache_mode; + /* cfg.eviction_policy = TODO */ + cfg.cache_line_size = cache_line_size; + cfg.metadata_layout = metadata_layout; + cfg.pt_unaligned_io = !unaligned_io; + cfg.use_submit_io_fast = !use_io_scheduler; + cfg.locked = true; + cfg.metadata_volatile = false; + + cfg.backfill.max_queue_size = max_writeback_queue_size; + cfg.backfill.queue_unblock_size = writeback_queue_unblock_size; + + device_cfg.uuid.data = cache_path; + device_cfg.uuid.size = strnlen(cache_path, MAX_STR_LEN) + 1; + device_cfg.volume_type = cache_type; + device_cfg.cache_line_size = cache_line_size; + device_cfg.perform_test = true; + device_cfg.force = false; + + result = cache_mng_init_instance(&cfg, &device_cfg, NULL); + +error: + kfree(cache_path); + return result; +} + +static int _cas_upgrade_restore_conf_core(struct cas_properties *cache_props, + ocf_cache_t cache) +{ + int result = 0; + unsigned long i = 0; + uint64_t core_id, core_no, version; + ocf_core_id_t core_id_int; + + char *core_path = NULL; + char *key = NULL; + struct ocf_mngt_core_config cfg = {}; + + CAS_DEBUG_TRACE(); + + key = kmalloc(sizeof(*key) * MAX_STR_LEN, GFP_KERNEL); + if (key == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + core_path = kzalloc(sizeof(*core_path) * MAX_STR_LEN, GFP_KERNEL); + if (core_path == NULL) { + kfree(key); + result = -OCF_ERR_NO_MEM; + return result; + } + + result = cas_properties_get_uint(cache_props, UPGRADE_IFACE_VERSION_STR, + &version); + if (result) + goto error; + + result = cas_properties_get_uint(cache_props, CORE_NO_STR, &core_no); + if (result) + goto error; + + for (i = 1; i < core_no + 1; i++) { + result = snprintf(key, MAX_STR_LEN, CORE_PATH_STR, i); + if (result < 0) + goto error; + + result = cas_properties_get_string(cache_props, key, + core_path, MAX_STR_LEN); + if (result) + goto error; + + result = snprintf(key, MAX_STR_LEN, CORE_ID_STR, i); + if (result < 0) + goto error; + + result = cas_properties_get_uint(cache_props, key, &core_id); + if (result) + goto error; + + core_id_int = core_id; + + cfg.try_add = 0; + cfg.volume_type = BLOCK_DEVICE_VOLUME; + cfg.core_id = core_id_int; + cfg.cache_id = ocf_cache_get_id(cache); + cfg.uuid.data = core_path; + cfg.uuid.size = strnlen(core_path, MAX_STR_LEN) + 1; + + result = cache_mng_add_core_to_cache(&cfg, NULL); + if (result) + goto error; + } + +error: + kfree(key); + kfree(core_path); + return result; +} + +static int _cas_upgrade_restore_conf_flush(struct cas_properties *cache_props, + ocf_cache_t cache) +{ + ocf_cache_id_t cache_id = ocf_cache_get_id(cache); + uint64_t cleaning_type; + uint64_t alru_thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP; + uint64_t alru_stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME; + uint64_t alru_flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS; + uint64_t alru_activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD; + uint64_t acp_thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP; + uint64_t acp_flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS; + uint64_t version; + int result = 0; + + CAS_DEBUG_TRACE(); + + result = cas_properties_get_uint(cache_props, UPGRADE_IFACE_VERSION_STR, + &version); + if (result) + return result; + + result = cas_properties_get_uint(cache_props, + CLEANING_POLICY_STR, &cleaning_type); + if (result) + return result; + + if (cleaning_type >= ocf_cleaning_max) + cleaning_type = ocf_cleaning_default; + + /* + * CLEANING_ALRU_WAKEUP_TIME PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ALRU_WAKEUP_TIME_STR, + &alru_thread_wakeup_time); + if (result) + return result; + + /* + * CLEANING_ALRU_STALENESS_TIME PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ALRU_STALENESS_TIME_STR, + &alru_stale_buffer_time); + if (result) + return result; + + /* + * CLEANING_ALRU_MAX_BUFFERS PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ALRU_MAX_BUFFERS_STR, + &alru_flush_max_buffers); + if (result) + return result; + + /* + * CLEANING_ALRU_TRESHOLD PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ALRU_TRESHOLD_STR, + &alru_activity_threshold); + if (result) + return result; + + /* + * CLEANING_ACP_WAKEUP_TIME PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ACP_WAKEUP_TIME_STR, + &acp_thread_wakeup_time); + if (result) + return result; + + /* + * CLEANING_ACP_MAX_BUFFERS PARAM + */ + + result = cas_properties_get_uint(cache_props, + CLEANING_ACP_MAX_BUFFERS_STR, + &acp_flush_max_buffers); + if (result) + return result; + + result |= cache_mng_set_cleaning_policy(cache_id, cleaning_type); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_wake_up_time, alru_thread_wakeup_time); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_stale_buffer_time, alru_stale_buffer_time); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_flush_max_buffers, alru_flush_max_buffers); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_alru, + ocf_alru_activity_threshold, alru_activity_threshold); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_acp, + ocf_acp_wake_up_time, acp_thread_wakeup_time); + result |= cache_mng_set_cleaning_param(cache_id, ocf_cleaning_acp, + ocf_acp_flush_max_buffers, acp_flush_max_buffers); + + return result; +} + +static int _cas_upgrade_restore_conf_io_class( + struct cas_properties *cache_props, ocf_cache_t cache) +{ + int result = 0; + unsigned long i = 0; + uint64_t io_class_no, min_size, max_size, priority, cache_mode, part_id; + char *name = NULL; + char *key = NULL; + struct kcas_io_classes *cfg; + + CAS_DEBUG_TRACE(); + + key = kzalloc(sizeof(*key) * MAX_STR_LEN, GFP_KERNEL); + if (key == NULL) { + result = -OCF_ERR_NO_MEM; + return result; + } + + name = kzalloc(sizeof(*name) * MAX_STR_LEN, GFP_KERNEL); + if (name == NULL) { + kfree(key); + result = -OCF_ERR_NO_MEM; + return result; + } + + cfg = kzalloc(KCAS_IO_CLASSES_SIZE, GFP_KERNEL); + if (cfg == NULL) { + kfree(key); + kfree(name); + result = -OCF_ERR_NO_MEM; + return result; + } + + cfg->cache_id = ocf_cache_get_id(cache); + + result = cas_properties_get_uint(cache_props, IO_CLASS_NO_STR, + &io_class_no); + if (result) + goto error_after_alloc_buffers; + + for (i = 1; i < io_class_no + 1; i++) { + result = snprintf(key, MAX_STR_LEN, IO_CLASS_NAME_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_string(cache_props, key, name, + MAX_STR_LEN); + if (result) + goto error_after_alloc_buffers; + + result = snprintf(key, MAX_STR_LEN, IO_CLASS_ID_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_uint(cache_props, key, &part_id); + if (result) + goto error_after_alloc_buffers; + + result = snprintf(key, MAX_STR_LEN, IO_CLASS_MIN_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_uint(cache_props, key, &min_size); + if (result) + goto error_after_alloc_buffers; + + result = snprintf(key, MAX_STR_LEN, IO_CLASS_MAX_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_uint(cache_props, key, &max_size); + if (result) + goto error_after_alloc_buffers; + + result = snprintf(key, MAX_STR_LEN, IO_CLASS_PRIO_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_uint(cache_props, key, &priority); + if (result) + goto error_after_alloc_buffers; + + result = snprintf(key, MAX_STR_LEN, IO_CLASS_CACHE_MODE_STR, i); + if (result < 0) + goto error_after_alloc_buffers; + + result = cas_properties_get_uint(cache_props, key, &cache_mode); + if (result) + goto error_after_alloc_buffers; + + result = env_strncpy(cfg->info[part_id].name, OCF_IO_CLASS_NAME_MAX, + name, OCF_IO_CLASS_NAME_MAX); + if (result) + goto error_after_alloc_buffers; + + cfg->info[part_id].priority = (int16_t)priority; + cfg->info[part_id].cache_mode = (ocf_cache_mode_t)cache_mode; + cfg->info[part_id].max_size = (uint32_t)max_size; + cfg->info[part_id].min_size = (uint32_t)min_size; + } + + result = cache_mng_set_partitions(cfg); + +error_after_alloc_buffers: + kfree(key); + kfree(name); + kfree(cfg); + return result; +} + +static int _cas_upgrade_restore_cache(struct cas_properties *cache_props) +{ + int result = 0; + uint64_t cache_id; + ocf_cache_t cache; + + CAS_DEBUG_TRACE(); + + result = _cas_upgrade_restore_conf_main(cache_props, &cache_id); + if (result) + return result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = _cas_upgrade_restore_conf_core(cache_props, cache); + if (result) + goto error; + + result = _cas_upgrade_restore_conf_flush(cache_props, cache); + if (result) + goto error; + + result = _cas_upgrade_restore_conf_io_class(cache_props, cache); + if (result) + goto error; + +error: + ocf_mngt_cache_put(cache); + return result; +} + +int _cas_upgrade_restore_cache_mode_visitor(ocf_core_t core, void *cntx) +{ + int *result = (int*) cntx; + ocf_volume_t vol; + + vol = ocf_core_get_volume(core); + *result = casdisk_functions.casdsk_disk_clear_pt(bd_object(vol)->dsk); + + return *result; +} + +static int _cas_upgrade_restore_cache_mode(struct cas_properties *cache_props) +{ + int result = 0; + uint64_t cache_id, cache_mode; + ocf_cache_t cache; + + CAS_DEBUG_TRACE(); + + result = cas_properties_get_uint(cache_props, CACHE_ID_STR, &cache_id); + if (result) + return result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result) + return result; + + result = cas_properties_get_uint(cache_props, CACHE_MODE_STR, + &cache_mode); + if (result) + goto error; + + if (ocf_cache_get_mode(cache) != cache_mode) { + result = cache_mng_set_cache_mode(ocf_cache_get_id(cache), + cache_mode, false); + if (result) + goto error; + + result |= ocf_core_visit(cache, + _cas_upgrade_restore_cache_mode_visitor, &result, true); + } + +error: + ocf_mngt_cache_put(cache); + return result; +} + +static int _cas_upgrade_restore_cache_after_error( + struct cas_properties *cache_props) +{ + int result = 0; + uint64_t cache_id; + ocf_cache_t cache = NULL; + + CAS_DEBUG_TRACE(); + + result = cas_properties_get_uint(cache_props, CACHE_ID_STR, &cache_id); + if (result) + return result; + + result = ocf_mngt_cache_get_by_id(cas_ctx, cache_id, &cache); + if (result == -OCF_ERR_CACHE_NOT_EXIST) { + result = _cas_upgrade_restore_cache(cache_props); + } else if (result == 0) { + result = _cas_upgrade_restore_cache_mode(cache_props); + ocf_mngt_cache_put(cache); + } + + return result; +} + +static int _cas_upgrade_restore_configuration( + struct casdsk_props_conf *caches_props_serialized_array, + size_t caches_no, restore_callback_t restore_callback) +{ + int result = 0, i = 0; + struct cas_properties **caches_props_array = NULL; + + CAS_DEBUG_TRACE(); + + caches_props_array = kcalloc(caches_no, sizeof(*caches_props_array), + GFP_KERNEL); + if (!caches_props_array) { + result = -OCF_ERR_NO_MEM; + return result; + } + + for (i = 0; i < caches_no; i++) { + caches_props_array[i] = cas_properites_parse( + &caches_props_serialized_array[i]); + if (IS_ERR(caches_props_array[i])) { + result = PTR_ERR(caches_props_array[i]); + break; + } + + if (caches_props_array[i]) { +#if 1 == CAS_UPGRADE_DEBUG + cas_properties_print(caches_props_array[i]); +#endif + result = restore_callback(caches_props_array[i]); + if (result) { + cas_properties_print(caches_props_array[i]); + break; + } + } + } + + _cas_upgrade_destroy_props_array(caches_props_array, caches_no); + kfree(caches_props_array); + return result; +} + +struct casdsk_props_conf *caches_serialized_conf_init; +size_t caches_no_init; + +int cas_upgrade_get_configuration(void) +{ + int result = 0; + struct casdsk_props_conf *buffer = NULL; + + CAS_DEBUG_TRACE(); + + caches_no_init = casdisk_functions.casdsk_get_stored_config(&buffer); + if (caches_no_init == 0 || !buffer) + return -KCAS_ERR_NO_STORED_CONF; + + _cas_upgrade_set_state(); + + caches_serialized_conf_init = buffer; + + return result; +} + +int cas_upgrade_check_ctx_visitor(ocf_cache_t cache, void *cntx) +{ + int result = ocf_cache_is_incomplete(cache); + + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Upgrade error. Cannot start upgrade in flight" + " cache %d is in incomplete state\n", + ocf_cache_get_id(cache)); + } + + return result; +} + +static int _cas_cache_attached_check_visitor(ocf_cache_t cache, void *cntx) +{ + if (!ocf_cache_is_device_attached(cache)) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Upgrade error. Cannot start upgrade in flight" + " when cache drive is detached!\n"); + return 1; + } + + return 0; +} + +static int _cas_upgrade_check_ctx_state(void) +{ + if (ocf_mngt_core_pool_get_count(cas_ctx)) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Upgrade error. Cannot start upgrade in flight" + " when core pool list is not empty\n"); + return -KCAS_ERR_CORE_POOL_NOT_EMPTY; + } + + if (ocf_mngt_cache_visit(cas_ctx ,_cas_cache_attached_check_visitor, + NULL)) { + return -KCAS_ERR_NO_CACHE_ATTACHED; + } + + if (ocf_mngt_cache_visit(cas_ctx, cas_upgrade_check_ctx_visitor, + NULL)) { + return -OCF_ERR_CACHE_IN_INCOMPLETE_STATE; + } + + return 0; +} + +int cas_upgrade(void) +{ + int result = 0, result_rollback = 0; + restore_callback_t restore_callback = NULL; + + CAS_DEBUG_TRACE(); + + result = _cas_upgrade_check_ctx_state(); + if (result) + return result; + + _cas_upgrade_set_state(); + + result = _cas_upgrade_dump_conf(); + if (result) + goto dump_err; + + result = _cas_upgrade_set_pt_and_flush(); + if (result) { + restore_callback = _cas_upgrade_restore_cache_mode; + goto upgrade_err; + } + + result = _cas_upgrade_stop_devices(); + if (result) { + restore_callback = _cas_upgrade_restore_cache_after_error; + goto upgrade_err; + } + + return 0; + +upgrade_err: + printk(KERN_ERR OCF_PREFIX_SHORT "Upgrade error. Start rollback"); + result_rollback = cas_upgrade_get_configuration(); + if (result_rollback != -KCAS_ERR_NO_STORED_CONF) { + result_rollback = _cas_upgrade_restore_configuration( + caches_serialized_conf_init, caches_no_init, + restore_callback); + } else { + /* nothing to rool back - that's good */ + result_rollback = 0; + } + if (result_rollback) { + /* rollback error */ + /* TODO: FIXME this path loses information about original cache + mode if we managed to switch to PT - configuration stored in + inteldisk will be freed before returning from this function. + */ + result = -KCAS_ERR_ROLLBACK; + } + + casdisk_functions.casdsk_free_stored_config(); + +dump_err: + _cas_upgrade_clear_state(); + return result; +} + +int cas_upgrade_finish(void) +{ + int result = 0, rollback_result = 0; + + CAS_DEBUG_TRACE(); + + result = _cas_upgrade_restore_configuration(caches_serialized_conf_init, + caches_no_init, _cas_upgrade_restore_cache); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during restoring configuration\n"); + rollback_result = _cas_upgrade_set_pt_and_flush(); + if (rollback_result) + result = rollback_result; + + rollback_result = _cas_upgrade_stop_devices(); + if (rollback_result) + result = rollback_result; + } else { + /* + * Remove configuration only in case when restoring finished + * successfully + */ + casdisk_functions.casdsk_free_stored_config(); + } + + _cas_upgrade_clear_state(); + + return result; +} + +static int _cas_upgrade_restore_noop(struct cas_properties *cache_props) +{ + return 0; +} + +int cas_upgrade_verify(void) +{ + int result = 0; + + CAS_DEBUG_TRACE(); + + result = _cas_upgrade_restore_configuration(caches_serialized_conf_init, + caches_no_init, + _cas_upgrade_restore_noop); + + return result; +} diff --git a/modules/cas_cache/layer_upgrade.h b/modules/cas_cache/layer_upgrade.h new file mode 100644 index 000000000..fbf5a03b0 --- /dev/null +++ b/modules/cas_cache/layer_upgrade.h @@ -0,0 +1,46 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __LAYER_UPGRADE_H + +#define __LAYER_UPGRADE_H + +#include "cas_cache/cas_cache.h" + +extern bool in_upgrade; + +/** + * @brief Check that CAS is in upgarde state + * @return true if is or false if isn't + */ +bool cas_upgrade_is_in_upgrade(void); + +/** + * @brief Check that caches configuration is stored at casdsk + * @return 0 if exist + */ +int cas_upgrade_get_configuration(void); + +/** + * @brief Start upgrade in flight procedure, dump configuration, + * switch caches to PT and close caches + * @return result + */ +int cas_upgrade(void); + +/** + * @brief Finish upgrade in new CAS module - restore all caches + * @return result of restoring + */ +int cas_upgrade_finish(void); + +/** + * @brief Try to parse configuration stored in casdisk + * @return result of verification + */ +int cas_upgrade_verify(void); + +#endif /* __LAYER_UPGRADE_H */ + diff --git a/modules/cas_cache/linux_kernel_version.h b/modules/cas_cache/linux_kernel_version.h new file mode 100644 index 000000000..da2c7981e --- /dev/null +++ b/modules/cas_cache/linux_kernel_version.h @@ -0,0 +1,624 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __LINUX_KERNEL_VERSION_H__ +#define __LINUX_KERNEL_VERSION_H__ + +/* Libraries. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SLAB +#include +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 0, 0) + #include + #ifdef UTS_UBUNTU_RELEASE_ABI + #define CAS_UBUNTU + #endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) + #error Unsupported Linux Kernel Version +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + #define FILE_INODE(file) file->f_inode +#else + #define FILE_INODE(file) file->f_dentry->d_inode +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 76) + #define DENTRY_ALIAS_HEAD(dentry) (dentry)->d_u.d_alias + #define ALIAS_NODE_TO_DENTRY(alias) container_of(alias, struct dentry, d_u.d_alias) +#else + #define DENTRY_ALIAS_HEAD(dentry) (dentry)->d_alias + #define ALIAS_NODE_TO_DENTRY(alias) container_of(alias, struct dentry, d_alias) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + #define ALIAS_NODE_TYPE struct hlist_node + #define DENTRY_LIST_EMPTY(head) hlist_empty(head) + #define INODE_FOR_EACH_DENTRY(pos, head) hlist_for_each(pos, head) +#else + #define DENTRY_LIST_EMPTY(head) list_empty(head) + #define ALIAS_NODE_TYPE struct list_head + #define INODE_FOR_EACH_DENTRY(pos, head) list_for_each(pos, head) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) + #define BIO_OP_STATUS(bio) bio->bi_status +#else + #define BIO_OP_STATUS(bio) bio->bi_error +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) + #define BIO_ENDIO(BIO, BYTES_DONE, ERROR) \ + ({ BIO_OP_STATUS(BIO) = ERROR; bio_endio(BIO); }) +#else + #define BIO_ENDIO(BIO, BYTES_DONE, ERROR) bio_endio(BIO, ERROR) +#endif + +#define REFER_BLOCK_CALLBACK(name) name##_callback +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) + #define DECLARE_BLOCK_CALLBACK(name, BIO, BYTES_DONE, ERROR) \ + void name##_callback(BIO, ERROR) + #define BLOCK_CALLBACK_INIT(BIO) {; } + #define BLOCK_CALLBACK_RETURN() { return; } + #define BLOCK_CALLBACK_ERROR(BIO, ERROR) ERROR +#else + #define DECLARE_BLOCK_CALLBACK(name, BIO, BYTES_DONE, ERROR) \ + void name##_callback(BIO) + #define BLOCK_CALLBACK_INIT(BIO) {; } + #define BLOCK_CALLBACK_RETURN() { return; } + #define BLOCK_CALLBACK_ERROR(BIO, ERROR) BIO_OP_STATUS(BIO) +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) + #define OPEN_BDEV_EXCLUSIVE(PATH, FMODE, HOLDER) \ + blkdev_get_by_path(PATH, (FMODE_EXCL | FMODE), HOLDER) + #define CLOSE_BDEV_EXCLUSIVE(BDEV, FMODE) \ + blkdev_put(BDEV, (FMODE_EXCL | FMODE)) +#else + #define OPEN_BDEV_EXCLUSIVE(PATH, FMODE, HOLDER) \ + open_bdev_exclusive(PATH, FMODE, HOLDER) + #define CLOSE_BDEV_EXCLUSIVE(BDEV, FMODE) \ + close_bdev_exclusive(BDEV, FMODE) +#endif + +#ifdef CAS_UBUNTU + #define LOOKUP_BDEV(PATH) lookup_bdev(PATH, 0) +#else + #define LOOKUP_BDEV(PATH) lookup_bdev(PATH) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) || defined CAS_SLES12SP3 + #define BIO_OP_FLAGS_FORMAT "0x%016X" + #define BIO_OP_FLAGS(bio) (bio)->bi_opf +#else + #define BIO_OP_FLAGS_FORMAT "0x%016lX" + #define BIO_OP_FLAGS(bio) (bio)->bi_rw +#endif + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32) + #define BIO_RW_FLAGS ((1U << BIO_RW_UNPLUG) | \ + (1U << BIO_RW_NOIDLE) | (1U << BIO_RW_SYNCIO)) + #define BIO_SET_RW_FLAGS(bio) BIO_OP_FLAGS((bio)) |= BIO_RW_FLAGS +#else + #define BIO_RW_FLAGS 0 + #define BIO_SET_RW_FLAGS(bio) +#endif + +#if defined RQF_SOFTBARRIER + #define CHECK_BARRIER(bio) ((BIO_OP_FLAGS(bio) & RQF_SOFTBARRIER) != 0) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 1) + #define CHECK_BARRIER(bio) ((BIO_OP_FLAGS(bio) & REQ_SOFTBARRIER) != 0) +#else + #define CHECK_BARRIER(bio) (bio_rw_flagged((bio), BIO_RW_BARRIER)) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) || defined CAS_SLES12SP3 + #define RQ_DATA_DIR(rq) rq_data_dir(rq) + #define RQ_DATA_DIR_WR WRITE +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) + #define RQ_DATA_DIR(rq) rq_data_dir(rq) + #define RQ_DATA_DIR_WR REQ_WRITE +#else + #define RQ_DATA_DIR(rq) rq_data_dir(rq) + #define RQ_DATA_DIR_WR WRITE +#endif + +#if defined REQ_PREFLUSH + #define CAS_REQ_FLUSH REQ_PREFLUSH + #define CAS_FLUSH_SUPPORTED +#elif defined REQ_FLUSH + #define CAS_REQ_FLUSH REQ_FLUSH + #define CAS_FLUSH_SUPPORTED +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) || defined CAS_SLES12SP3 +#define CHECK_QUEUE_FLUSH(q) test_bit(QUEUE_FLAG_WC, &(q)->queue_flags) +#define CHECK_QUEUE_FUA(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) + +static inline void cas_set_queue_flush_fua(struct request_queue *q, + bool flush, bool fua) +{ + blk_queue_write_cache(q, flush, fua); +} + +#else +#define CHECK_QUEUE_FLUSH(q) ((q)->flush_flags & CAS_REQ_FLUSH) +#define CHECK_QUEUE_FUA(q) ((q)->flush_flags & REQ_FUA) + +static inline void cas_set_queue_flush_fua(struct request_queue *q, + bool flush, bool fua) +{ + unsigned int flags = 0; + if (flush) + flags |= CAS_REQ_FLUSH; + if (fua) + flags |= REQ_FUA; + if (flags) + blk_queue_flush(q, flags); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + + #ifdef WRITE_FLUSH + #define RQ_IS_FLUSH(rq) ((rq)->cmd_flags & CAS_REQ_FLUSH) + #ifdef BIO_FLUSH + #define CAS_IS_WRITE_FLUSH(flags) ((flags) & BIO_FLUSH) + #else + #define CAS_IS_WRITE_FLUSH(flags) \ + ((flags) & CAS_REQ_FLUSH) + #endif + + #define OCF_WRITE_FLUSH WRITE_FLUSH + #elif defined REQ_PREFLUSH + #define RQ_IS_FLUSH(rq) ((rq)->cmd_flags & REQ_PREFLUSH) + #define OCF_WRITE_FLUSH (REQ_OP_WRITE | REQ_PREFLUSH) + #define CAS_IS_WRITE_FLUSH(flags) \ + (OCF_WRITE_FLUSH == ((flags) & OCF_WRITE_FLUSH)) + #else + #define RQ_IS_FLUSH(rq) 0 + #define CAS_IS_WRITE_FLUSH(flags) \ + (WRITE_BARRIER == ((flags) & WRITE_BARRIER)) + #define OCF_WRITE_FLUSH WRITE_BARRIER + #endif /* #ifdef WRITE_FLUSH */ + + #ifdef WRITE_FLUSH_FUA + #define OCF_WRITE_FLUSH_FUA WRITE_FLUSH_FUA + #ifdef BIO_FUA + #define CAS_IS_WRITE_FLUSH_FUA(flags) \ + ((BIO_FUA | BIO_FLUSH) == \ + ((flags) & (BIO_FUA | BIO_FLUSH))) + #else + #define CAS_IS_WRITE_FLUSH_FUA(flags) \ + ((REQ_FUA | CAS_REQ_FLUSH) == \ + ((flags) & (REQ_FUA | CAS_REQ_FLUSH))) + #endif + + #elif defined REQ_PREFLUSH + #define CAS_IS_WRITE_FLUSH_FUA(flags) \ + ((REQ_PREFLUSH | REQ_FUA) == \ + ((flags) & (REQ_PREFLUSH |REQ_FUA))) + #define OCF_WRITE_FLUSH_FUA (REQ_PREFLUSH | REQ_FUA) + #else + #define CAS_IS_WRITE_FLUSH_FUA(flags) 0 + #define OCF_WRITE_FLUSH_FUA WRITE_BARRIER + #endif /* #ifdef WRITE_FLUSH_FUA */ + + #ifdef WRITE_FUA + #ifdef BIO_FUA + #define CAS_IS_WRITE_FUA(flags) ((flags) & BIO_FUA) + #else + #define CAS_IS_WRITE_FUA(flags) ((flags) & REQ_FUA) + #endif + #define OCF_WRITE_FUA WRITE_FUA + #elif defined REQ_FUA + #define CAS_IS_WRITE_FUA(flags) ((flags) & REQ_FUA) + #define OCF_WRITE_FUA REQ_FUA + #else + #define CAS_IS_WRITE_FUA(flags) 0 + #define OCF_WRITE_FUA WRITE_BARRIER + #endif /* #ifdef WRITE_FUA */ + +#endif /* #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 9) + #define DAEMONIZE(name, arg...) daemonize(name, ##arg) +#else + #define DAEMONIZE(name, arg...) do { } while (0) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + #define SET_QUEUE_CHUNK_SECTORS(queue, chunk_size) \ + queue->limits.chunk_sectors = chunk_size; +#else + #define SET_QUEUE_CHUNK_SECTORS(queue, chunk_size) {; } +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) + #define BIO_BISIZE(bio) bio->bi_size + #define BIO_BIIDX(bio) bio->bi_idx + #define BIO_BISECTOR(bio) bio->bi_sector +#else + #define BIO_BISIZE(bio) bio->bi_iter.bi_size + #define BIO_BISECTOR(bio) bio->bi_iter.bi_sector + #define BIO_BIIDX(bio) bio->bi_iter.bi_idx +#endif + +#ifdef CAS_SLES12SP3 + #define CAS_IS_DISCARD(bio) \ + (((BIO_OP_FLAGS(bio)) & REQ_OP_MASK) == REQ_OP_DISCARD) + #define CAS_BIO_DISCARD \ + ((REQ_OP_WRITE | REQ_OP_DISCARD)) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + #define CAS_IS_DISCARD(bio) \ + (bio_op(bio) == REQ_OP_DISCARD) + #define CAS_BIO_DISCARD \ + (REQ_OP_DISCARD) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + #define CAS_IS_DISCARD(bio) \ + ((BIO_OP_FLAGS(bio)) & REQ_OP_DISCARD) + #define CAS_BIO_DISCARD \ + ((REQ_OP_WRITE | REQ_OP_DISCARD)) +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) + #define CAS_IS_DISCARD(bio) ((BIO_OP_FLAGS(bio)) & REQ_DISCARD) + #define CAS_BIO_DISCARD (REQ_WRITE | REQ_DISCARD) +#else + #define CAS_IS_DISCARD(bio) ((BIO_OP_FLAGS(bio)) & (1 << BIO_RW_DISCARD)) + #define CAS_BIO_DISCARD ((1 << BIO_RW) | (1 << BIO_RW_DISCARD)) +#endif + +#include + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) +#include +static inline unsigned long cas_vm_mmap(struct file *file, + unsigned long addr, unsigned long len) +{ + return vm_mmap(file, addr, len, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0); +} + +static inline int cas_vm_munmap(unsigned long start, size_t len) +{ + return vm_munmap(start, len); +} +#else +#include +static inline unsigned long cas_vm_mmap(struct file *file, + unsigned long addr, unsigned long len) +{ + return do_mmap_pgoff(file, addr, len, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0); +} + +static inline int cas_vm_munmap(unsigned long start, size_t len) +{ + return do_munmap(current->mm, start, len); +} +#endif + +/* + * For 8KB process kernel stack check if request is not continous and + * submit each bio as separate request. This prevent nvme driver from + * splitting requests. + * For large requests, nvme splitting causes stack overrun. + */ +#if THREAD_SIZE <= 8192 + #define RQ_CHECK_CONTINOUS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + #define SEGMENT_BVEC(vec) (&(vec)) +#else + #define SEGMENT_BVEC(vec) (vec) +#endif + +#ifndef SHRT_MIN + #define SHRT_MIN ((s16)-32768) +#endif + +#ifndef SHRT_MAX + #define SHRT_MAX ((s16)32767) +#endif + +#define ENOTSUP ENOTSUPP + +#ifdef RHEL_RELEASE_VERSION + #if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 3) + #define CAS_RHEL_73 + #endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined CAS_SLES12SP3 +static inline blk_qc_t cas_submit_bio(int rw, struct bio *bio) +{ + BIO_OP_FLAGS(bio) |= rw; + return submit_bio(bio); +} +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +static inline blk_qc_t cas_submit_bio(int rw, struct bio *bio) +{ + return submit_bio(rw, bio); +} +#else +static inline void cas_submit_bio(int rw, struct bio *bio) +{ + submit_bio(rw, bio); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#define cas_blk_rq_set_block_pc(rq) {} +#else +#define cas_blk_rq_set_block_pc(rq) blk_rq_set_block_pc(rq) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) +#define cas_blk_queue_bounce(q, bounce_bio) ({}) +#else +#define cas_blk_queue_bounce(q, bounce_bio) blk_queue_bounce(q, bounce_bio) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 11) +#define cas_blk_rq_append_bio(rq, bounce_bio) blk_rq_append_bio(rq, &bounce_bio) +#else +#define cas_blk_rq_append_bio(rq, bounce_bio) blk_rq_append_bio(rq, bounce_bio) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) || defined CAS_SLES12SP3 +static inline struct request *cas_blk_make_request(struct request_queue *q, + struct bio *bio, gfp_t gfp_mask) +{ + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (IS_ERR(rq)) + return rq; + + cas_blk_rq_set_block_pc(rq); + rq->q = q; + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + cas_blk_queue_bounce(q, &bounce_bio); + ret = cas_blk_rq_append_bio(rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; +} +#else +static inline struct request *cas_blk_make_request(struct request_queue *q, + struct bio *bio, gfp_t gfp_mask) +{ + return blk_make_request(q, bio, gfp_mask); +} +#endif + +#ifdef CAS_RHEL_73 +static inline void cas_copy_queue_limits(struct request_queue *exp_q, + struct request_queue *cache_q, struct request_queue *core_q) +{ + struct queue_limits_aux *l_aux = exp_q->limits.limits_aux; + + exp_q->limits = cache_q->limits; + exp_q->limits.limits_aux = l_aux; + if (exp_q->limits.limits_aux && cache_q->limits.limits_aux) + *exp_q->limits.limits_aux = *cache_q->limits.limits_aux; + + exp_q->limits.max_sectors = core_q->limits.max_sectors; + exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; + exp_q->limits.max_segments = core_q->limits.max_segments; + exp_q->limits.max_write_same_sectors = 0; + + /* + * Workaround for RHEL/CentOS 7.3 bug in kernel. + * Merging implementation on blk-mq does not respec virt boundary + * restriction and front merges bios with non-zero offsets. + * This leads to request with gaps between bios and in consequence + * triggers BUG_ON() in nvme driver or silently corrupts data. + * To prevent this, disable merging on cache queue if there are + * requirements regarding virt boundary (marking bios with REQ_NOMERGE + * does not solve this problem). + */ + if (queue_virt_boundary(cache_q)) + queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q); + +} +#else +static inline void cas_copy_queue_limits(struct request_queue *exp_q, + struct request_queue *cache_q, struct request_queue *core_q) +{ + exp_q->limits = cache_q->limits; + + exp_q->limits.max_sectors = core_q->limits.max_sectors; + exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; + exp_q->limits.max_segments = core_q->limits.max_segments; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) || defined CAS_SLES12SP3 + exp_q->limits.max_write_same_sectors = 0; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || defined CAS_SLES12SP3 + exp_q->limits.max_write_zeroes_sectors = 0; +#endif +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) + #define CAS_GARBAGE_COLLECTOR +#endif + +/* rate-limited printk */ +#define CAS_PRINT_RL(...) \ + if (printk_ratelimit()) \ + printk(__VA_ARGS__) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) +static inline void cas_generic_start_io_acct(struct request_queue *q, + int rw, unsigned long sectors, struct hd_struct *part) +{ + int cpu = part_stat_lock(); + + part_round_stats(cpu, part); + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, sectors[rw], sectors); + part_inc_in_flight(part, rw); + + part_stat_unlock(); +} + +static inline void cas_generic_end_io_acct(struct request_queue *q, + int rw, struct hd_struct *part, unsigned long start_time) +{ + unsigned long duration = jiffies - start_time; + int cpu = part_stat_lock(); + + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part, rw); + + part_stat_unlock(); +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +static inline void cas_generic_start_io_acct(struct request_queue *q, + int rw, unsigned long sectors, struct hd_struct *part) +{ + generic_start_io_acct(rw, sectors, part); +} + +static inline void cas_generic_end_io_acct(struct request_queue *q, + int rw, struct hd_struct *part, unsigned long start_time) +{ + generic_end_io_acct(rw, part, start_time); +} +#else +static inline void cas_generic_start_io_acct(struct request_queue *q, + int rw, unsigned long sectors, struct hd_struct *part) +{ + generic_start_io_acct(q, rw, sectors, part); +} + +static inline void cas_generic_end_io_acct(struct request_queue *q, + int rw, struct hd_struct *part, unsigned long start_time) +{ + generic_end_io_acct(q, rw, part, start_time); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +static inline unsigned long cas_global_zone_page_state(enum zone_stat_item item) +{ + return global_zone_page_state(item); +} +#define CAS_BIO_SET_DEV(bio, bdev) bio_set_dev(bio, bdev) +#define CAS_BIO_GET_DEV(bio) bio->bi_disk +#else +static inline unsigned long cas_global_zone_page_state(enum zone_stat_item item) +{ + return global_page_state(item); +} +#define CAS_BIO_SET_DEV(bio, bdev) bio->bi_bdev = bdev +#define CAS_BIO_GET_DEV(bio) bio->bi_bdev->bd_disk +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) + #define CAS_RATELIMIT(state, func_name) __ratelimit(state) +#else + #define CAS_RATELIMIT(state, func_name) ___ratelimit(state, func_name) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +static inline struct bio *cas_bio_clone(struct bio *bio, gfp_t gfp_mask) +{ + return bio_clone_fast(bio, gfp_mask, NULL); +} +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) +static inline struct bio *cas_bio_clone(struct bio *bio, gfp_t gfp_mask) +{ + return bio_clone_kmalloc(bio, gfp_mask); +} + #define CAS_BLK_STATUS_T blk_status_t +#else +static inline struct bio *cas_bio_clone(struct bio *bio, gfp_t gfp_mask) +{ + return bio_clone(bio, gfp_mask); +} + #define CAS_BLK_STATUS_T int +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +static inline int is_rq_type_fs(struct request *rq) +{ + switch (req_op(rq)){ + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_FLUSH: + case REQ_OP_DISCARD: + return true; + default: + return false; + } +} +#else +static inline int is_rq_type_fs(struct request *rq) +{ + return rq->cmd_type == REQ_TYPE_FS; +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) + #define CAS_SET_DISCARD_ZEROES_DATA(queue_limits, val) ({}) +#else + #define CAS_SET_DISCARD_ZEROES_DATA(queue_limits, val) \ + queue_limits.discard_zeroes_data = val +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + #define cas_queue_flag_set_unlocked(flag, request_queue) \ + blk_queue_flag_set(flag, request_queue) +#else + #define cas_queue_flag_set_unlocked(flag, request_queue) \ + queue_flag_set_unlocked(flag, request_queue) +#endif + +#endif /* #ifndef __LINUX_KERNEL_VERSION_H__ */ diff --git a/modules/cas_cache/main.c b/modules/cas_cache/main.c new file mode 100644 index 000000000..c46dd1d9c --- /dev/null +++ b/modules/cas_cache/main.c @@ -0,0 +1,210 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +/* Layer information. */ +MODULE_AUTHOR("Intel(R) Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(CAS_VERSION); + +u32 max_writeback_queue_size = 65536; +module_param(max_writeback_queue_size, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(max_writeback_queue_size, + "Max cache writeback queue size (65536)"); + +u32 writeback_queue_unblock_size = 60000; +module_param(writeback_queue_unblock_size, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(writeback_queue_unblock_size, + "Cache writeback queue size (60000) at which queue " + "is unblocked when blocked"); + +u32 dry_run; +module_param(dry_run, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(dry_run, "Perform dry run on module load"); + +u32 use_io_scheduler = 1; +module_param(use_io_scheduler, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(use_io_scheduler, + "Configure how IO shall be handled. " + "0 - in make request function, 1 - in request function"); + +u32 metadata_layout = ocf_metadata_layout_default; +module_param(metadata_layout, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(metadata_layout, "Metadata layout, 0 - striping, 1 - sequential"); + +u32 unaligned_io = 1; +module_param(unaligned_io, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(unaligned_io, + "Define how to handle I/O requests unaligned to 4 kiB, " + "0 - apply PT, 1 - handle by cache"); + +u32 seq_cut_off_mb = 1; +module_param(seq_cut_off_mb, uint, (S_IRUSR | S_IRGRP)); +MODULE_PARM_DESC(seq_cut_off_mb, + "Sequential cut off threshold in MiB. 0 - disable"); + +/* globals */ +bool in_upgrade; +ocf_ctx_t cas_ctx; +struct casdsk_functions_mapper casdisk_functions; + +struct exported_symbol { + char *name; + unsigned long addr; +}; + +int static cas_find_symbol(void *data, const char *namebuf, + struct module *module, unsigned long kallsyms_addresses) +{ + struct exported_symbol *sym = data; + + if (strcmp(namebuf, sym->name) == 0) + sym->addr = kallsyms_addresses; + return 0; +} + +#define cas_lookup_symbol(f) ({ \ + struct exported_symbol sym = {#f, 0}; \ + kallsyms_on_each_symbol(&cas_find_symbol, &sym); \ + casdisk_functions.f = (void *)sym.addr; \ + if (!casdisk_functions.f) \ + return -EINVAL; \ +}) + +int static cas_casdisk_lookup_funtions(void) +{ + cas_lookup_symbol(casdsk_disk_dettach); + cas_lookup_symbol(casdsk_exp_obj_destroy); + cas_lookup_symbol(casdsk_exp_obj_create); + cas_lookup_symbol(casdsk_disk_get_queue); + cas_lookup_symbol(casdsk_store_config); + cas_lookup_symbol(casdsk_disk_get_blkdev); + cas_lookup_symbol(casdsk_exp_obj_get_queue); + cas_lookup_symbol(casdsk_get_version); + cas_lookup_symbol(casdsk_disk_close); + cas_lookup_symbol(casdsk_disk_claim); + cas_lookup_symbol(casdsk_exp_obj_unlock); + cas_lookup_symbol(casdsk_disk_set_pt); + cas_lookup_symbol(casdsk_get_stored_config); + cas_lookup_symbol(casdsk_disk_get_gendisk); + cas_lookup_symbol(casdsk_disk_attach); + cas_lookup_symbol(casdsk_disk_set_attached); + cas_lookup_symbol(casdsk_exp_obj_activate); + cas_lookup_symbol(casdsk_exp_obj_activated); + cas_lookup_symbol(casdsk_exp_obj_lock); + cas_lookup_symbol(casdsk_free_stored_config); + cas_lookup_symbol(casdsk_disk_open); + cas_lookup_symbol(casdsk_disk_clear_pt); + cas_lookup_symbol(casdsk_exp_obj_get_gendisk); + return 0; +} + +static int __init cas_init_module(void) +{ + int result = 0; + result = cas_casdisk_lookup_funtions(); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Could not find inteldisk functions.\n"); + return result; + } + + if (casdisk_functions.casdsk_get_version() != CASDSK_IFACE_VERSION) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Incompatible inteldisk module\n"); + return -EINVAL; + } + + if (!writeback_queue_unblock_size || !max_writeback_queue_size) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Invalid module parameter.\n"); + return -EINVAL; + } + + if (writeback_queue_unblock_size >= max_writeback_queue_size) { + printk(KERN_ERR OCF_PREFIX_SHORT + "parameter writeback_queue_unblock_size" + " must be less than max_writeback_queue_size\n"); + return -EINVAL; + } + + if (metadata_layout >= ocf_metadata_layout_max) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Invalid value for metadata_layout parameter\n"); + return -EINVAL; + } + + if (unaligned_io != 0 && unaligned_io != 1) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Invalid value for unaligned_io parameter\n"); + return -EINVAL; + } + + if (use_io_scheduler != 0 && use_io_scheduler != 1) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Invalid value for use_io_scheduler parameter\n"); + return -EINVAL; + } + + result = cas_initialize_context(); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Cannot initialize cache library\n"); + return result; + } + + result = cas_upgrade_get_configuration(); + if (-KCAS_ERR_NO_STORED_CONF == result) { + printk(KERN_INFO OCF_PREFIX_SHORT + "Not found configuration for upgrade. " + "Standard module initialization.\n"); + } else { + if (!dry_run) { + result = cas_upgrade_finish(); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during finish upgrade, " + "result: %d\n", result); + goto error_cas_ctx_init; + } + } else { + result = cas_upgrade_verify(); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Error during upgrade " + "verification\n"); + goto error_cas_ctx_init; + } + } + } + + result = cas_ctrl_device_init(); + if (result) { + printk(KERN_ERR OCF_PREFIX_SHORT + "Cannot initialize control device\n"); + goto error_cas_ctx_init; + } + + printk(KERN_INFO "%s Version %s (%s)::Module loaded successfully\n", + OCF_PREFIX_LONG, CAS_VERSION, CAS_KERNEL); + + return 0; + +error_cas_ctx_init: + cas_cleanup_context(); + + return result; +} + +module_init(cas_init_module); + +static void __exit cas_exit_module(void) +{ + cas_ctrl_device_deinit(); + cas_cleanup_context(); +} + +module_exit(cas_exit_module); diff --git a/modules/cas_cache/ocf_env.c b/modules/cas_cache/ocf_env.c new file mode 100644 index 000000000..cc2dd7bb9 --- /dev/null +++ b/modules/cas_cache/ocf_env.c @@ -0,0 +1,284 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#include "utils/utils_rpool.h" + +/* *** ALLOCATOR *** */ + +#define CAS_ALLOC_ALLOCATOR_LIMIT 256 + +struct _env_allocator { + /*!< Memory pool ID unique name */ + char *name; + + /*!< Size of specific item of memory pool */ + uint32_t item_size; + + /*!< OS handle to memory pool */ + struct kmem_cache *kmem_cache; + + /*!< Number of currently allocated items in pool */ + atomic_t count; + + struct cas_reserve_pool *rpool; +}; + +static inline size_t env_allocator_align(size_t size) +{ + if (size <= 2) + return size; + return (1ULL << 32) >> __builtin_clz(size - 1); +} + +struct _env_allocator_item { + uint32_t flags; + uint32_t cpu; + char data[]; +}; + +void *env_allocator_new(env_allocator *allocator) +{ + struct _env_allocator_item *item = NULL; + int cpu; + + item = cas_rpool_try_get(allocator->rpool, &cpu); + if (item) { + memset(item->data, 0, allocator->item_size - + sizeof(struct _env_allocator_item)); + } else { + item = kmem_cache_zalloc(allocator->kmem_cache, GFP_ATOMIC); + } + + if (item) { + item->cpu = cpu; + atomic_inc(&allocator->count); + return &item->data; + } else { + return NULL; + } +} + +void *env_allocator_new_rpool(void *allocator_ctx, int cpu) +{ + env_allocator *allocator = (env_allocator*) allocator_ctx; + struct _env_allocator_item *item; + + item = kmem_cache_zalloc(allocator->kmem_cache, GFP_NOIO | + __GFP_NORETRY); + + if (item) { + item->flags = (GFP_NOIO | __GFP_NORETRY); + item->cpu = cpu; + } + + return item; +} + +void env_allocator_del_rpool(void *allocator_ctx, void *item) +{ + env_allocator *allocator = (env_allocator* ) allocator_ctx; + + kmem_cache_free(allocator->kmem_cache, item); +} + +#define ENV_ALLOCATOR_NAME_MAX 128 + +env_allocator *env_allocator_create(uint32_t size, const char *name) +{ + int error = -1; + bool retry = true; + + env_allocator *allocator = kzalloc(sizeof(*allocator), GFP_KERNEL); + if (!allocator) { + error = __LINE__; + goto err; + } + + if (size < CAS_RPOOL_MIN_SIZE_ITEM) { + printk(KERN_ERR "Can not create allocator." + " Item size is too small."); + ENV_WARN(true, OCF_PREFIX_SHORT" Can not create allocator." + " Item size is too small.\n"); + error = __LINE__; + goto err; + } + + allocator->item_size = size + sizeof(struct _env_allocator_item); + if (allocator->item_size > PAGE_SIZE) { + printk(KERN_WARNING "Creating allocator with item size" + " greater than 4096B"); + ENV_WARN(true, OCF_PREFIX_SHORT" Creating allocator" + " with item size greater than 4096B\n"); + } + + allocator->name = kstrdup(name, ENV_MEM_NORMAL); + + if (!allocator->name) { + error = __LINE__; + goto err; + } + + /* Initialize kernel memory cache */ +#ifdef CONFIG_SLAB +RETRY: +#else + (void)retry; +#endif + + allocator->kmem_cache = kmem_cache_create(allocator->name, + allocator->item_size, 0, 0, NULL); + if (!allocator->kmem_cache) { + /* Can not setup kernel memory cache */ + error = __LINE__; + goto err; + } + +#ifdef CONFIG_SLAB + if ((allocator->item_size < PAGE_SIZE) + && allocator->kmem_cache->gfporder) { + /* Goal is to have one page allocation */ + if (retry) { + retry = false; + kmem_cache_destroy(allocator->kmem_cache); + allocator->kmem_cache = NULL; + allocator->item_size = env_allocator_align(allocator->item_size); + goto RETRY; + } + } +#endif + + /* Initialize reserve pool handler per cpu */ + + allocator->rpool = cas_rpool_create(CAS_ALLOC_ALLOCATOR_LIMIT, + allocator->name, allocator->item_size, env_allocator_new_rpool, + env_allocator_del_rpool, allocator); + if (!allocator->rpool) { + error = __LINE__; + goto err; + } + + return allocator; + +err: + printk(KERN_ERR "Cannot create memory allocator, ERROR %d", error); + env_allocator_destroy(allocator); + + return NULL; +} + +void env_allocator_del(env_allocator *allocator, void *obj) +{ + struct _env_allocator_item *item = + container_of(obj, struct _env_allocator_item, data); + + atomic_dec(&allocator->count); + + if (item->flags == (GFP_NOIO | __GFP_NORETRY) && + !cas_rpool_try_put(allocator->rpool, item, item->cpu)) + return; + + kmem_cache_free(allocator->kmem_cache, item); +} + +void env_allocator_destroy(env_allocator *allocator) +{ + if (allocator) { + cas_rpool_destroy(allocator->rpool, env_allocator_del_rpool, + allocator); + allocator->rpool = NULL; + + if (atomic_read(&allocator->count)) { + printk(KERN_CRIT "Not all object deallocated\n"); + ENV_WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n"); + } + + if (allocator->kmem_cache) + kmem_cache_destroy(allocator->kmem_cache); + + kfree(allocator->name); + kfree(allocator); + } +} + +uint32_t env_allocator_item_count(env_allocator *allocator) +{ + return atomic_read(&allocator->count); +} + +static int env_sort_is_aligned(const void *base, int align) +{ + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + ((unsigned long)base & (align - 1)) == 0; +} + +static void env_sort_u32_swap(void *a, void *b, int size) +{ + u32 t = *(u32 *)a; + *(u32 *)a = *(u32 *)b; + *(u32 *)b = t; +} + +static void env_sort_u64_swap(void *a, void *b, int size) +{ + u64 t = *(u64 *)a; + *(u64 *)a = *(u64 *)b; + *(u64 *)b = t; +} + +static void env_sort_generic_swap(void *a, void *b, int size) +{ + char t; + + do { + t = *(char *)a; + *(char *)a++ = *(char *)b; + *(char *)b++ = t; + } while (--size > 0); +} + +void env_sort(void *base, size_t num, size_t size, + int (*cmp_fn)(const void *, const void *), + void (*swap_fn)(void *, void *, int size)) +{ + /* pre-scale counters for performance */ + int64_t i = (num/2 - 1) * size, n = num * size, c, r; + + if (!swap_fn) { + if (size == 4 && env_sort_is_aligned(base, 4)) + swap_fn = env_sort_u32_swap; + else if (size == 8 && env_sort_is_aligned(base, 8)) + swap_fn = env_sort_u64_swap; + else + swap_fn = env_sort_generic_swap; + } + + /* heapify */ + for ( ; i >= 0; i -= size) { + for (r = i; r * 2 + size < n; r = c) { + c = r * 2 + size; + if (c < n - size && + cmp_fn(base + c, base + c + size) < 0) + c += size; + if (cmp_fn(base + r, base + c) >= 0) + break; + swap_fn(base + r, base + c, size); + } + } + + /* sort */ + for (i = n - size; i > 0; i -= size) { + swap_fn(base, base + i, size); + for (r = 0; r * 2 + size < i; r = c) { + c = r * 2 + size; + if (c < i - size && + cmp_fn(base + c, base + c + size) < 0) + c += size; + if (cmp_fn(base + r, base + c) >= 0) + break; + swap_fn(base + r, base + c, size); + } + } +} diff --git a/modules/cas_cache/ocf_env.h b/modules/cas_cache/ocf_env.h new file mode 100644 index 000000000..7c9621d82 --- /dev/null +++ b/modules/cas_cache/ocf_env.h @@ -0,0 +1,584 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __OCF_ENV_H__ +#define __OCF_ENV_H__ + +#include "linux_kernel_version.h" +#include "utils/utils_gc.h" +#include "ocf/ocf_err.h" + +/* linux sector 512-bytes */ +#define ENV_SECTOR_SHIFT 9 + +/* *** MEMORY MANAGEMENT *** */ + +#define ENV_MEM_NORMAL GFP_KERNEL +#define ENV_MEM_NOIO GFP_NOIO +#define ENV_MEM_ATOMIC GFP_ATOMIC + +static inline uint64_t env_get_free_memory(void) +{ + return cas_global_zone_page_state(NR_FREE_PAGES) << PAGE_SHIFT; +} + +static inline void *env_malloc(size_t size, int flags) +{ + return kmalloc(size, flags); +} + +static inline void *env_zalloc(size_t size, int flags) +{ + return kzalloc(size, flags); +} + +static inline void env_free(const void *ptr) +{ + kfree(ptr); +} + +static inline void *env_vmalloc(size_t size) +{ + return vmalloc(size); +} + +static inline void *env_vzalloc(size_t size) +{ + return vzalloc(size); +} + +static inline void env_vfree(const void *ptr) +{ + cas_vfree(ptr); +} + +/* *** ALLOCATOR *** */ + +typedef struct _env_allocator env_allocator; + +env_allocator *env_allocator_create(uint32_t size, const char *name); + +void env_allocator_destroy(env_allocator *allocator); + +void *env_allocator_new(env_allocator *allocator); + +void env_allocator_del(env_allocator *allocator, void *item); + +uint32_t env_allocator_item_count(env_allocator *allocator); + +/* *** MUTEX *** */ + +typedef struct mutex env_mutex; + +static inline int env_mutex_init(env_mutex *mutex) +{ + mutex_init(mutex); + return 0; +} + +static inline void env_mutex_lock(env_mutex *mutex) +{ + mutex_lock(mutex); +} + +static inline int env_mutex_lock_interruptible(env_mutex *mutex) +{ + return mutex_lock_interruptible(mutex) ? -OCF_ERR_INTR : 0; +} + +static inline int env_mutex_trylock(env_mutex *mutex) +{ + return mutex_trylock(mutex) ? 0 : -OCF_ERR_NO_LOCK; +} + +static inline void env_mutex_unlock(env_mutex *mutex) +{ + mutex_unlock(mutex); +} + +static inline int env_mutex_is_locked(env_mutex *mutex) +{ + return mutex_is_locked(mutex); +} + +/* *** RECURSIVE MUTEX *** */ + +typedef struct { + struct mutex mutex; + atomic_t count; + struct task_struct *holder; +} env_rmutex; + +static inline int env_rmutex_init(env_rmutex *rmutex) +{ + mutex_init(&rmutex->mutex); + atomic_set(&rmutex->count, 0); + rmutex->holder = NULL; + return 0; +} + +static inline void env_rmutex_lock(env_rmutex *rmutex) +{ + if (current == rmutex->holder) { + atomic_inc(&rmutex->count); + return; + } + + mutex_lock(&rmutex->mutex); + rmutex->holder = current; + atomic_inc(&rmutex->count); +} + +static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex) +{ + int result = 0; + if (current == rmutex->holder) { + atomic_inc(&rmutex->count); + return 0; + } + + result = mutex_lock_interruptible(&rmutex->mutex); + if (result) { + /* No lock */ + return -OCF_ERR_INTR; + } + + rmutex->holder = current; + atomic_inc(&rmutex->count); + + return 0; +} + +static inline int env_rmutex_trylock(env_rmutex *rmutex) +{ + if (current == rmutex->holder) { + atomic_inc(&rmutex->count); + return 0; + } + + if (mutex_trylock(&rmutex->mutex)) { + /* No lock */ + return -OCF_ERR_NO_LOCK; + } + + rmutex->holder = current; + atomic_inc(&rmutex->count); + + return 0; +} + +static inline void env_rmutex_unlock(env_rmutex *rmutex) +{ + BUG_ON(current != rmutex->holder); + + if (atomic_dec_return(&rmutex->count)) { + return; + } + + rmutex->holder = NULL; + mutex_unlock(&rmutex->mutex); +} + +static inline int env_rmutex_is_locked(env_rmutex *rmutex) +{ + return mutex_is_locked(&rmutex->mutex); +} + +/* *** RW SEMAPHORE *** */ + +typedef struct +{ + struct rw_semaphore sem; + wait_queue_head_t wq; +} env_rwsem; + +static inline int env_rwsem_init(env_rwsem *s) +{ + init_rwsem(&s->sem); + init_waitqueue_head(&s->wq); + return 0; +} + +static inline void env_rwsem_up_read(env_rwsem *s) +{ + up_read(&s->sem); + wake_up_all(&s->wq); +} + +static inline void env_rwsem_down_read(env_rwsem *s) +{ + down_read(&s->sem); +} + +static inline int env_rwsem_down_read_interruptible(env_rwsem *s) +{ + return wait_event_interruptible(s->wq, + down_read_trylock(&s->sem)) ? -OCF_ERR_INTR : 0; +} + +static inline int env_rwsem_down_read_trylock(env_rwsem *s) +{ + return down_read_trylock(&s->sem) ? 0 : -OCF_ERR_NO_LOCK; +} + +static inline void env_rwsem_up_write(env_rwsem *s) +{ + up_write(&s->sem); + wake_up_all(&s->wq); +} + +static inline void env_rwsem_down_write(env_rwsem *s) +{ + down_write(&s->sem); +} + +static inline int env_rwsem_down_write_interruptible(env_rwsem *s) +{ + return wait_event_interruptible(s->wq, + down_write_trylock(&s->sem)) ? -OCF_ERR_INTR : 0; +} + +static inline int env_rwsem_down_write_trylock(env_rwsem *s) +{ + return down_write_trylock(&s->sem) ? 0 : -OCF_ERR_NO_LOCK; +} + +static inline int env_rwsem_is_locked(env_rwsem *s) +{ + return rwsem_is_locked(&s->sem); +} + +/* *** COMPLETION *** */ + +typedef struct completion env_completion; + +static inline void env_completion_init(env_completion *completion) +{ + init_completion(completion); +} + +static inline void env_completion_wait(env_completion *completion) +{ + wait_for_completion(completion); +} + +static inline void env_completion_complete(env_completion *completion) +{ + complete(completion); +} + +/* *** ATOMIC VARIABLES *** */ + +typedef atomic_t env_atomic; +typedef atomic64_t env_atomic64; + +static inline int env_atomic_read(const env_atomic *a) +{ + return atomic_read(a); +} + +static inline void env_atomic_set(env_atomic *a, int i) +{ + atomic_set(a, i); +} + +static inline void env_atomic_add(int i, env_atomic *a) +{ + atomic_add(i, a); +} + +static inline void env_atomic_sub(int i, env_atomic *a) +{ + atomic_sub(i, a); +} + +static inline bool env_atomic_sub_and_test(int i, env_atomic *a) +{ + return atomic_sub_and_test(i, a); +} + +static inline void env_atomic_inc(env_atomic *a) +{ + atomic_inc(a); +} + +static inline void env_atomic_dec(env_atomic *a) +{ + atomic_dec(a); +} + +static inline bool env_atomic_dec_and_test(env_atomic *a) +{ + return atomic_dec_and_test(a); +} + +static inline bool env_atomic_inc_and_test(env_atomic *a) +{ + return atomic_inc_and_test(a); +} + +static inline int env_atomic_add_return(int i, env_atomic *a) +{ + return atomic_add_return(i, a); +} + +static inline int env_atomic_sub_return(int i, env_atomic *a) +{ + return atomic_sub_return(i, a); +} + +static inline int env_atomic_inc_return(env_atomic *a) +{ + return atomic_inc_return(a); +} + +static inline int env_atomic_dec_return(env_atomic *a) +{ + return atomic_dec_return(a); +} + +static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value) +{ + return atomic_cmpxchg(a, old, new_value); +} + +static inline int env_atomic_add_unless(env_atomic *a, int i, int u) +{ + return atomic_add_unless(a, i, u); +} + +static inline u64 env_atomic64_read(const env_atomic64 *a) +{ + return atomic64_read(a); +} + +static inline void env_atomic64_set(env_atomic64 *a, u64 i) +{ + atomic64_set(a, i); +} + +static inline void env_atomic64_add(u64 i, env_atomic64 *a) +{ + atomic64_add(i, a); +} + +static inline void env_atomic64_sub(u64 i, env_atomic64 *a) +{ + atomic64_sub(i, a); +} + +static inline void env_atomic64_inc(env_atomic64 *a) +{ + atomic64_inc(a); +} + +static inline void env_atomic64_dec(env_atomic64 *a) +{ + atomic64_dec(a); +} + +static inline u64 env_atomic64_inc_return(env_atomic64 *a) +{ + return atomic64_inc_return(a); +} + +static inline u64 env_atomic64_cmpxchg(atomic64_t *a, u64 old, u64 new) +{ + return atomic64_cmpxchg(a, old, new); +} + +/* *** SPIN LOCKS *** */ + +typedef spinlock_t env_spinlock; + +static inline void env_spinlock_init(env_spinlock *l) +{ + spin_lock_init(l); +} + +static inline void env_spinlock_lock(env_spinlock *l) +{ + spin_lock(l); +} + +static inline void env_spinlock_unlock(env_spinlock *l) +{ + spin_unlock(l); +} + +static inline void env_spinlock_lock_irq(env_spinlock *l) +{ + spin_lock_irq(l); +} + +static inline void env_spinlock_unlock_irq(env_spinlock *l) +{ + spin_unlock_irq(l); +} + +#define env_spinlock_lock_irqsave(l, flags) \ + spin_lock_irqsave((l), (flags)) + +#define env_spinlock_unlock_irqrestore(l, flags) \ + spin_unlock_irqrestore((l), (flags)) + +/* *** RW LOCKS *** */ + +typedef rwlock_t env_rwlock; + +static inline void env_rwlock_init(env_rwlock *l) +{ + rwlock_init(l); +} + +static inline void env_rwlock_read_lock(env_rwlock *l) +{ + read_lock(l); +} + +static inline void env_rwlock_read_unlock(env_rwlock *l) +{ + read_unlock(l); +} + +static inline void env_rwlock_write_lock(env_rwlock *l) +{ + write_lock(l); +} + +static inline void env_rwlock_write_unlock(env_rwlock *l) +{ + write_unlock(l); +} + +/* *** WAITQUEUE *** */ + +typedef wait_queue_head_t env_waitqueue; + +static inline void env_waitqueue_init(env_waitqueue *w) +{ + init_waitqueue_head(w); +} + +static inline void env_waitqueue_wake_up(env_waitqueue *w) +{ + wake_up(w); +} + +#define env_waitqueue_wait(w, condition) \ + wait_event_interruptible((w), (condition)) + +/* *** SCHEDULING *** */ +static inline void env_cond_resched(void) +{ + cond_resched(); +} + +static inline int env_in_interrupt(void) +{ + return in_interrupt();; +} + +/* *** TIME *** */ +static inline uint64_t env_get_tick_count(void) +{ + return jiffies; +} + +static inline uint64_t env_ticks_to_msecs(uint64_t j) +{ + return jiffies_to_msecs(j); +} + +static inline uint64_t env_ticks_to_nsecs(uint64_t j) +{ + return jiffies_to_usecs(j) * NSEC_PER_USEC; +} + +static inline bool env_time_after(uint64_t a, uint64_t b) +{ + return time_after64(a,b); +} + +static inline uint64_t env_ticks_to_secs(uint64_t j) +{ + return j >> SHIFT_HZ; +} + +static inline uint64_t env_secs_to_ticks(uint64_t j) +{ + return j << SHIFT_HZ; +} + +/* *** BIT OPERATIONS *** */ + +static inline void env_bit_set(int nr, volatile void *addr) +{ + set_bit(nr, addr); +} + +static inline void env_bit_clear(int nr, volatile void *addr) +{ + clear_bit(nr, addr); +} + +static inline int env_bit_test(int nr, const void *addr) +{ + return test_bit(nr, addr); +} + +static inline void env_msleep(uint64_t n) +{ + msleep(n); +} + +/* *** STRING OPERATIONS *** */ + + +#define env_memset(dest, dmax, val) ({ \ + memset(dest, val, dmax); \ + 0; \ + }) +#define env_memcpy(dest, dmax, src, slen) ({ \ + memcpy(dest, src, min_t(int, dmax, slen)); \ + 0; \ + }) +#define env_memcmp(s1, s1max, s2, s2max, diff) ({ \ + *diff = memcmp(s1, s2, min_t(int, s1max, s2max)); \ + 0; \ + }) +#define env_strdup kstrdup +#define env_strnlen(s, smax) strnlen(s, smax) +#define env_strncmp strncmp +#define env_strncpy(dest, dmax, src, slen) ({ \ + strlcpy(dest, src, min_t(int, dmax, slen)); \ + 0; \ + }) + +/* *** SORTING *** */ + +void env_sort(void *base, size_t num, size_t size, + int (*cmp_fn)(const void *, const void *), + void (*swap_fn)(void *, void *, int size)); + +/* *** CRC *** */ + +static inline uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len) +{ + return crc32(crc, data, len); +} + +/* *** LOGGING *** */ + +#define ENV_PRIu64 "llu" + +#define ENV_WARN(cond, fmt...) WARN(cond, fmt) +#define ENV_WARN_ON(cond) WARN_ON(cond) + +#define ENV_BUG() BUG() +#define ENV_BUG_ON(cond) BUG_ON(cond) + +#endif /* __OCF_ENV_H__ */ diff --git a/modules/cas_cache/ocf_env_headers.h b/modules/cas_cache/ocf_env_headers.h new file mode 100644 index 000000000..fecb21a8d --- /dev/null +++ b/modules/cas_cache/ocf_env_headers.h @@ -0,0 +1,21 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __OCF_ENV_HEADERS_H__ +#define __OCF_ENV_HEADERS_H__ + +#include + +/* TODO: Move prefix printing to context logger. */ +#define OCF_LOGO "Open-CAS" +#define OCF_PREFIX_SHORT "[" OCF_LOGO "] " +#define OCF_PREFIX_LONG "Open Cache Acceleration Software Linux" + +#define OCF_VERSION_MAIN CAS_VERSION_MAIN +#define OCF_VERSION_MAJOR CAS_VERSION_MAJOR +#define OCF_VERSION_MINOR CAS_VERSION_MINOR + +#endif /* __OCF_ENV_HEADERS_H__ */ diff --git a/modules/cas_cache/service_ui_ioctl.c b/modules/cas_cache/service_ui_ioctl.c new file mode 100644 index 000000000..44a7fd2fe --- /dev/null +++ b/modules/cas_cache/service_ui_ioctl.c @@ -0,0 +1,414 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +struct { + int cas_error; + int std_error; +} static cas_error_code_map[] = { + /* IOC error mappings*/ + { OCF_ERR_INVAL, EINVAL }, + { OCF_ERR_INVAL_VOLUME_TYPE, EINVAL }, + { OCF_ERR_INTR, EINTR }, + { OCF_ERR_UNKNOWN, EINVAL }, + { OCF_ERR_TOO_MANY_CACHES, ENOSPC }, + { OCF_ERR_NO_MEM, ENOMEM }, + { OCF_ERR_NO_FREE_RAM, ENOMEM }, + { OCF_ERR_START_CACHE_FAIL, EFAULT }, + { OCF_ERR_CACHE_IN_USE, EBUSY }, + { OCF_ERR_CACHE_NOT_EXIST, ENODEV }, + { OCF_ERR_CACHE_EXIST, EEXIST }, + { OCF_ERR_TOO_MANY_CORES, ENOSPC }, + { OCF_ERR_CORE_NOT_AVAIL, ENAVAIL }, + { OCF_ERR_NOT_OPEN_EXC, EBUSY }, + { OCF_ERR_CACHE_NOT_AVAIL, ENAVAIL }, + { OCF_ERR_IO_CLASS_NOT_EXIST, ENODEV }, + { OCF_ERR_WRITE_CACHE, EIO }, + { OCF_ERR_WRITE_CORE, EIO }, + { OCF_ERR_DIRTY_SHUTDOWN, EFAULT }, + { OCF_ERR_DIRTY_EXISTS, EFAULT }, + { OCF_ERR_FLUSHING_INTERRUPTED, EINTR }, + + /* CAS kernel error mappings*/ + { KCAS_ERR_ROOT, EPERM }, + { KCAS_ERR_SYSTEM, EINVAL }, + { KCAS_ERR_BAD_RANGE, ERANGE }, + { KCAS_ERR_DEV_SPACE, ENOSPC }, + { KCAS_ERR_INV_IOCTL, EINVAL }, + { KCAS_ERR_DEV_PENDING, EBUSY }, + { KCAS_ERR_DIRTY_EXISTS_NVME, EFAULT }, + { KCAS_ERR_FILE_EXISTS, EEXIST }, + { KCAS_ERR_IN_UPGRADE, EFAULT }, + { KCAS_ERR_UNALIGNED, EINVAL }, + { KCAS_ERR_NO_STORED_CONF, EINTR }, + { KCAS_ERR_ROLLBACK, EFAULT }, + { KCAS_ERR_NOT_NVME, ENODEV }, + { KCAS_ERR_FORMAT_FAILED, EFAULT }, + { KCAS_ERR_NVME_BAD_FORMAT, EINVAL }, + { KCAS_ERR_CONTAINS_PART, EINVAL }, + { KCAS_ERR_A_PART, EINVAL }, + { KCAS_ERR_REMOVED_DIRTY, EIO }, + { KCAS_ERR_STOPPED_DIRTY, EIO }, +}; + +/*******************************************/ +/* Helper which change cas-specific error */ +/* codes to kernel generic error codes */ +/*******************************************/ + +int map_cas_err_to_generic_code(int cas_error_code) +{ + int i; + + if (cas_error_code == 0) + return 0; /* No Error */ + + cas_error_code = abs(cas_error_code); + + for (i = 0; i < ARRAY_SIZE(cas_error_code_map); i++) { + if (cas_error_code_map[i].cas_error == cas_error_code) + return -cas_error_code_map[i].std_error; + } + + return -cas_error_code; +} + +#define _GET_CMD_INFO(cmd_info, arg, size) ({ \ + cmd_info = vmalloc(size); \ + if (!cmd_info) \ + return -ENOMEM; \ + if (copy_from_user(cmd_info, (void __user *)arg, size)) { \ + printk(KERN_ALERT "Cannot copy cmd info from user space\n"); \ + vfree(cmd_info); \ + return -EINVAL; \ + } \ +}) + +#define GET_CMD_INFO(cmd_info, arg) _GET_CMD_INFO(cmd_info, arg, \ + sizeof(*cmd_info)) + +#define RETURN_CMD_RESULT(cmd_info, arg, result) ({ \ + int ret = result; \ + cmd_info->ext_err_code = abs(result); \ + if (copy_to_user((void __user *)arg, cmd_info, sizeof(*cmd_info))) { \ + printk(KERN_ALERT "Unable to copy response to user\n"); \ + ret = -EFAULT; \ + } \ + vfree(cmd_info); \ + return map_cas_err_to_generic_code(ret); \ +}) + +/* this handles IOctl for /dev/cas */ +/*********************************************/ +long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int retval = 0; + + if (_IOC_TYPE(cmd) != KCAS_IOCTL_MAGIC) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) { + /* Must be root to issue ioctls */ + return -EPERM; + } + + if (cas_upgrade_is_in_upgrade() && + cmd != KCAS_IOCTL_CACHE_INFO && + cmd != KCAS_IOCTL_LIST_CACHE && + cmd != KCAS_IOCTL_GET_CACHE_COUNT && + cmd != KCAS_IOCTL_CORE_INFO && + cmd != KCAS_IOCTL_PARTITION_STATS && + cmd != KCAS_IOCTL_GET_CAPABILITIES) { + return -EFAULT; + } + + switch (cmd) { + case KCAS_IOCTL_START_CACHE: { + struct kcas_start_cache *cmd_info; + struct ocf_mngt_cache_config cfg; + struct ocf_mngt_cache_device_config device_cfg; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_prepare_cache_cfg(&cfg, &device_cfg, cmd_info); + if (retval) + RETURN_CMD_RESULT(cmd_info, arg, retval); + + retval = cache_mng_init_instance(&cfg, &device_cfg, cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_STOP_CACHE: { + struct kcas_stop_cache *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_exit_instance(cmd_info->cache_id, + cmd_info->flush_data); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_SET_CACHE_STATE: { + struct kcas_set_cache_state *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_set_cache_mode(cmd_info->cache_id, + cmd_info->caching_mode, cmd_info->flush_data); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_INSERT_CORE: { + struct kcas_insert_core *cmd_info; + struct ocf_mngt_core_config cfg; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_prepare_core_cfg(&cfg, cmd_info); + if (retval) + RETURN_CMD_RESULT(cmd_info, arg, retval); + + retval = cache_mng_add_core_to_cache(&cfg, cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_REMOVE_CORE: { + struct kcas_remove_core *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_remove_core_from_cache(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_RESET_STATS: { + struct kcas_reset_stats *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_reset_core_stats(cmd_info->cache_id, + cmd_info->core_id); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_FLUSH_CACHE: { + struct kcas_flush_cache *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_flush_device(cmd_info->cache_id); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_INTERRUPT_FLUSHING: { + struct kcas_interrupt_flushing *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_interrupt_flushing(cmd_info->cache_id); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_FLUSH_CORE: { + struct kcas_flush_core *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_flush_object(cmd_info->cache_id, + cmd_info->core_id); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_CACHE_INFO: { + struct kcas_cache_info *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_get_info(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_CORE_INFO: { + struct kcas_core_info *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_get_core_info(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_PARTITION_STATS: { + struct kcas_io_class *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_get_io_class_info(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + + } + + case KCAS_IOCTL_PARTITION_SET: { + struct kcas_io_classes *cmd_info; + + /* copy entire memory from user, including array of + * ocf_io_class_info structs past the end of kcas_io_classes */ + _GET_CMD_INFO(cmd_info, arg, KCAS_IO_CLASSES_SIZE); + + retval = cache_mng_set_partitions(cmd_info); + + /* return just sizeof(struct kcas_io_classes) bytes of data */ + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + case KCAS_IOCTL_GET_CACHE_COUNT: { + struct kcas_cache_count *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + cmd_info->cache_count = ocf_mngt_cache_get_count(cas_ctx); + + RETURN_CMD_RESULT(cmd_info, arg, 0); + } + + case KCAS_IOCTL_LIST_CACHE: { + struct kcas_cache_list *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_list_caches(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval > 0 ? 0 : retval); + } + + case KCAS_IOCTL_GET_CAPABILITIES: { + struct kcas_capabilites *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + memset(cmd_info, 0, sizeof(*cmd_info)); +#ifdef CAS_NVME_FULL + cmd_info->nvme_format = 1; +#endif + RETURN_CMD_RESULT(cmd_info, arg, 0); + } + + case KCAS_IOCTL_UPGRADE: { + struct kcas_upgrade *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cas_upgrade(); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + +#if defined(CAS_NVME_FULL) + case KCAS_IOCTL_NVME_FORMAT: { + struct kcas_nvme_format *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cas_nvme_format_optimal( + cmd_info->device_path_name, + cmd_info->metadata_mode, + cmd_info->force); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } +#endif + + case KCAS_IOCTL_GET_CORE_POOL_COUNT: { + struct kcas_core_pool_count *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + cmd_info->core_pool_count = + ocf_mngt_core_pool_get_count(cas_ctx); + + RETURN_CMD_RESULT(cmd_info, arg, 0); + } + + case KCAS_IOCTL_GET_CORE_POOL_PATHS: { + struct kcas_core_pool_path *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_core_pool_get_paths(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_CORE_POOL_REMOVE: { + struct kcas_core_pool_remove *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_core_pool_remove(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_CACHE_CHECK_DEVICE: { + struct kcas_cache_check_device *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_cache_check_device(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_SET_CORE_PARAM: { + struct kcas_set_core_param *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_set_core_params(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_GET_CORE_PARAM: { + struct kcas_get_core_param *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_get_core_params(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_SET_CACHE_PARAM: { + struct kcas_set_cache_param *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_set_cache_params(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + case KCAS_IOCTL_GET_CACHE_PARAM: { + struct kcas_get_cache_param *cmd_info; + + GET_CMD_INFO(cmd_info, arg); + + retval = cache_mng_get_cache_params(cmd_info); + + RETURN_CMD_RESULT(cmd_info, arg, retval); + } + + default: + return -EINVAL; + } +} diff --git a/modules/cas_cache/service_ui_ioctl.h b/modules/cas_cache/service_ui_ioctl.h new file mode 100644 index 000000000..8c9cfc5ec --- /dev/null +++ b/modules/cas_cache/service_ui_ioctl.h @@ -0,0 +1,15 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __SERVICE_UI_IOCTL_H__ + +#define __SERVICE_UI_IOCTL_H__ + +struct casdsk_disk; + +long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd, + unsigned long arg); + +#endif diff --git a/modules/cas_cache/threads.c b/modules/cas_cache/threads.c new file mode 100644 index 000000000..83af4c2b0 --- /dev/null +++ b/modules/cas_cache/threads.c @@ -0,0 +1,281 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "threads.h" +#include "cas_cache.h" + +#define MAX_THREAD_NAME_SIZE 16 + +struct cas_thread_info { + atomic_t stop; + struct completion compl; + struct completion sync_compl; + void *sync_data; + wait_queue_head_t wq; + atomic_t kicked; + struct task_struct *thread; + char name[MAX_THREAD_NAME_SIZE]; + bool running; +}; + +static int _cas_io_queue_thread(void *data) +{ + ocf_queue_t q = data; + struct cas_thread_info *info; + + BUG_ON(!q); + + /* complete the creation of the thread */ + info = ocf_queue_get_priv(q); + BUG_ON(!info); + + DAEMONIZE(info->thread->comm); + + complete(&info->compl); + + /* Continue working until signaled to exit. */ + do { + /* Wait until there are completed read misses from the HDDs, + * or a stop. + */ + wait_event_interruptible(info->wq, ocf_queue_pending_io(q) || + atomic_read(&info->stop)); + + ocf_queue_run(q); + + } while (!atomic_read(&info->stop) || ocf_queue_pending_io(q)); + + WARN(ocf_queue_pending_io(q), "Still pending IO requests\n"); + + /* If we get here, then thread was signalled to terminate. + * So, let's complete and exit. + */ + complete_and_exit(&info->compl, 0); + + return 0; +} + +static void _cas_cleaner_complete(ocf_cleaner_t c, uint32_t interval) +{ + struct cas_thread_info *info = ocf_cleaner_get_priv(c); + uint32_t *ms = info->sync_data; + + *ms = interval; + complete(&info->sync_compl); +} + +static int _cas_cleaner_thread(void *data) +{ + ocf_cleaner_t c = data; + ocf_cache_t cache = ocf_cleaner_get_cache(c); + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + struct cas_thread_info *info; + uint32_t ms; + + BUG_ON(!c); + + ENV_BUG_ON(!cache_priv); + /* complete the creation of the thread */ + info = ocf_cleaner_get_priv(c); + BUG_ON(!info); + + DAEMONIZE(info->thread->comm); + + complete(&info->compl); + + info->sync_data = &ms; + ocf_cleaner_set_cmpl(c, _cas_cleaner_complete); + + do { + init_completion(&info->sync_compl); + ocf_cleaner_run(c, cache_priv->io_queues[smp_processor_id()]); + wait_for_completion(&info->sync_compl); + } while (0 == wait_event_interruptible_timeout(info->wq, + atomic_read(&info->stop), msecs_to_jiffies(ms))); + + complete_and_exit(&info->compl, 0); + + return 0; +} + +static int _cas_metadata_updater_thread(void *data) +{ + ocf_metadata_updater_t mu = data; + struct cas_thread_info *info; + + BUG_ON(!mu); + + /* complete the creation of the thread */ + info = ocf_metadata_updater_get_priv(mu); + BUG_ON(!info); + + DAEMONIZE(info->thread->comm); + + complete(&info->compl); + + do { + if (atomic_read(&info->stop)) + break; + + atomic_set(&info->kicked, 0); + if (ocf_metadata_updater_run(mu)) + continue; + + wait_event_interruptible(info->wq, atomic_read(&info->stop) || + atomic_read(&info->kicked)); + } while (true); + + complete_and_exit(&info->compl, 0); + + return 0; +} + +static int _cas_create_thread(struct cas_thread_info **pinfo, + int (*threadfn)(void *), void *priv, int cpu, + const char *fmt, ...) +{ + struct cas_thread_info *info; + struct task_struct *thread; + va_list args; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + atomic_set(&info->stop, 0); + init_completion(&info->compl); + init_completion(&info->sync_compl); + init_waitqueue_head(&info->wq); + + va_start(args, fmt); + vsnprintf(info->name, sizeof(info->name), fmt, args); + va_end(args); + + thread = kthread_create(threadfn, priv, "%s", info->name); + if (IS_ERR(thread)) { + kfree(info); + /* Propagate error code as PTR_ERR */ + return PTR_ERR(thread); + } + info->thread = thread; + + /* Affinitize thread to core */ + if (cpu != CAS_CPUS_ALL) + kthread_bind(thread, cpu); + + if (pinfo) + *pinfo = info; + + return 0; + +} + +static void _cas_start_thread(struct cas_thread_info *info) +{ + wake_up_process(info->thread); + wait_for_completion(&info->compl); + + info->running = true; + + printk(KERN_DEBUG "Thread %s started\n", info->name); +} + +static void _cas_stop_thread(struct cas_thread_info *info) +{ + if (info->running && info->thread) { + init_completion(&info->compl); + atomic_set(&info->stop, 1); + wake_up(&info->wq); + wait_for_completion(&info->compl); + printk(KERN_DEBUG "Thread %s stopped\n", info->name); + } + kfree(info); +} + +int cas_create_queue_thread(ocf_queue_t q, int cpu) +{ + struct cas_thread_info *info; + ocf_cache_t cache = ocf_queue_get_cache(q); + int result; + + result = _cas_create_thread(&info, _cas_io_queue_thread, q, cpu, + "cas_io_%s_%d", ocf_cache_get_name(cache), cpu); + if (!result) { + ocf_queue_set_priv(q, info); + _cas_start_thread(info); + } + + return result; +} + +void cas_kick_queue_thread(ocf_queue_t q) +{ + struct cas_thread_info *info = ocf_queue_get_priv(q); + wake_up(&info->wq); +} + + +void cas_stop_queue_thread(ocf_queue_t q) +{ + struct cas_thread_info *info = ocf_queue_get_priv(q); + ocf_queue_set_priv(q, NULL); + _cas_stop_thread(info); +} + +int cas_create_cleaner_thread(ocf_cleaner_t c) +{ + struct cas_thread_info *info; + ocf_cache_t cache = ocf_cleaner_get_cache(c); + int result; + + result = _cas_create_thread(&info, _cas_cleaner_thread, c, + CAS_CPUS_ALL, "cas_clean_%d", + ocf_cache_get_id(cache)); + if (!result) { + ocf_cleaner_set_priv(c, info); + _cas_start_thread(info); + } + + return result; +} + +void cas_stop_cleaner_thread(ocf_cleaner_t c) +{ + struct cas_thread_info *info = ocf_cleaner_get_priv(c); + ocf_cleaner_set_priv(c, NULL); + _cas_stop_thread(info); +} + +int cas_create_metadata_updater_thread(ocf_metadata_updater_t mu) +{ + struct cas_thread_info *info; + int result; + + result = _cas_create_thread(&info, _cas_metadata_updater_thread, + mu, CAS_CPUS_ALL, "ocf_metadata_updater_%d", + ocf_cache_get_id(ocf_metadata_updater_get_cache(mu))); + if (!result) { + ocf_metadata_updater_set_priv(mu, info); + _cas_start_thread(info); + } + + return result; +} + +void cas_kick_metadata_updater_thread(ocf_metadata_updater_t mu) +{ + struct cas_thread_info *info = ocf_metadata_updater_get_priv(mu); + atomic_set(&info->kicked, 1); + wake_up(&info->wq); +} + + +void cas_stop_metadata_updater_thread(ocf_metadata_updater_t mu) +{ + struct cas_thread_info *info = ocf_metadata_updater_get_priv(mu); + ocf_metadata_updater_set_priv(mu, NULL); + _cas_stop_thread(info); +} + diff --git a/modules/cas_cache/threads.h b/modules/cas_cache/threads.h new file mode 100644 index 000000000..fc8bde8d3 --- /dev/null +++ b/modules/cas_cache/threads.h @@ -0,0 +1,26 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __THREADS_H__ +#define __THREADS_H__ + +#include "ocf/ocf.h" +#include "linux_kernel_version.h" + +#define CAS_CPUS_ALL -1 + +int cas_create_queue_thread(ocf_queue_t q, int cpu); +void cas_kick_queue_thread(ocf_queue_t q); +void cas_stop_queue_thread(ocf_queue_t q); + +int cas_create_cleaner_thread(ocf_cleaner_t c); +void cas_stop_cleaner_thread(ocf_cleaner_t c); + +int cas_create_metadata_updater_thread(ocf_metadata_updater_t mu); +void cas_kick_metadata_updater_thread(ocf_metadata_updater_t mu); +void cas_stop_metadata_updater_thread(ocf_metadata_updater_t mu); + +#endif /* __THREADS_H__ */ diff --git a/modules/cas_cache/utils/cas_cache_utils.h b/modules/cas_cache/utils/cas_cache_utils.h new file mode 100644 index 000000000..578a4d83f --- /dev/null +++ b/modules/cas_cache/utils/cas_cache_utils.h @@ -0,0 +1,13 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + + +#ifndef __CAS_UTILS_H__ +#define __CAS_UTILS_H__ + +#include "utils_nvme.h" +#include "utils_properties.h" + +#endif /* __CAS_UTILS_H__ */ diff --git a/modules/cas_cache/utils/utils_blk.c b/modules/cas_cache/utils/utils_blk.c new file mode 100644 index 000000000..1aaa573b0 --- /dev/null +++ b/modules/cas_cache/utils/utils_blk.c @@ -0,0 +1,22 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "utils_blk.h" + +int cas_blk_get_part_count(struct block_device *bdev) +{ + struct disk_part_tbl *ptbl; + int i, count = 0; + + rcu_read_lock(); + ptbl = rcu_dereference(bdev->bd_disk->part_tbl); + for (i = 0; i < ptbl->len; ++i) { + if (rcu_access_pointer(ptbl->part[i])) + count++; + } + rcu_read_unlock(); + + return count; +} diff --git a/modules/cas_cache/utils/utils_blk.h b/modules/cas_cache/utils/utils_blk.h new file mode 100644 index 000000000..1eeeae060 --- /dev/null +++ b/modules/cas_cache/utils/utils_blk.h @@ -0,0 +1,14 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef UTILS_BLK_H_ +#define UTILS_BLK_H_ + +#include +#include + +int cas_blk_get_part_count(struct block_device *bdev); + +#endif /* UTILS_BLK_H_ */ diff --git a/modules/cas_cache/utils/utils_data.c b/modules/cas_cache/utils/utils_data.c new file mode 100644 index 000000000..d0992ffa8 --- /dev/null +++ b/modules/cas_cache/utils/utils_data.c @@ -0,0 +1,130 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +/** + * This function locates index of IO vec from given vecs array where byte at + * offset is located. When found it returns its index and byte offset within + * this vec. + * @param vecs IO vector array to be searched + * @param vec_num number of items in IO vector array + * @param offset byte offset to be found + * @param offset_in_vec byte offset within found IO vec + * @return vec index if it lies within specified buffer, otherwise -1 + */ +static int get_starting_vec(struct bio_vec *vecs, uint64_t vecs_num, + uint64_t offset, uint64_t *offset_in_vec) +{ + int i; + + for (i = 0; i < vecs_num; i++) { + if (vecs[i].bv_len > offset) { + if (offset_in_vec != NULL) + *offset_in_vec = offset; + return i; + } + offset -= vecs[i].bv_len; + } + + return -1; +} + +uint64_t cas_data_cpy(struct bio_vec *dst, uint64_t dst_num, + struct bio_vec *src, uint64_t src_num, + uint64_t to, uint64_t from, uint64_t bytes) +{ + uint64_t i, j, dst_len, src_len, to_copy; + uint64_t dst_off, src_off; + uint64_t written = 0; + int ret; + void *dst_p, *src_p; + struct bio_vec *curr_dst, *curr_src; + + /* Locate vec idx and offset in dst vec array */ + ret = get_starting_vec(dst, dst_num, to, &to); + if (ret < 0) { + CAS_PRINT_RL(KERN_INFO "llu dst buffer too small " + "to_offset=%llu bytes=%llu", to, bytes); + return 0; + } + j = ret; + + /* Locate vec idx and offset in src vec array */ + ret = get_starting_vec(src, src_num, from, &from); + if (ret < 0) { + CAS_PRINT_RL(KERN_INFO "llu src buffer too small " + "from_offset=%llu bytes=%llu", from, bytes); + return 0; + } + i = ret; + + curr_dst = &dst[j]; + curr_src = &src[i]; + + dst_off = curr_dst->bv_offset + to; + dst_len = curr_dst->bv_len - to; + + src_off = curr_src->bv_offset + from; + src_len = curr_src->bv_len - from; + + while (written < bytes) { + dst_p = page_address(curr_dst->bv_page) + dst_off; + src_p = page_address(curr_src->bv_page) + src_off; + + to_copy = src_len > dst_len ? dst_len : src_len; + + /* Prevent from copying too much*/ + if ((written + to_copy) > bytes) + to_copy = bytes - written; + + memcpy(dst_p, src_p, to_copy); + written += to_copy; + + if (written == bytes) + break; + + /* Setup new len and offset. */ + dst_off += to_copy; + dst_len -= to_copy; + + src_off += to_copy; + src_len -= to_copy; + + /* Go to next src buffer */ + if (src_len == 0) { + i++; + + /* Setup new len and offset. */ + if (i < src_num) { + curr_src = &src[i]; + src_off = curr_src->bv_offset; + src_len = curr_src->bv_len; + } else { + break; + } + } + + /* Go to next dst buffer */ + if (dst_len == 0) { + j++; + + if (j < dst_num) { + curr_dst = &dst[j]; + dst_off = curr_dst->bv_offset; + dst_len = curr_dst->bv_len; + } else { + break; + } + } + } + + if (written != bytes) { + CAS_PRINT_RL(KERN_INFO "Written bytes not equal requested bytes " + "(written=%llu; requested=%llu)", written, bytes); + } + + return written; +} diff --git a/modules/cas_cache/utils/utils_data.h b/modules/cas_cache/utils/utils_data.h new file mode 100644 index 000000000..71b31b946 --- /dev/null +++ b/modules/cas_cache/utils/utils_data.h @@ -0,0 +1,31 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef UTILS_DATA_H_ +#define UTILS_DATA_H_ + +/** + * @brief Copy data from a data vector to another one + * + * This function copies number of bytes from source IO vector to destination + * IO vector. It starts coping to specified offset in destination IO vector. If + * there is not enough space it will return number of bytes that was + * successfully copied. + * + * @param dst destination IO vector + * @param dst_num size of destination IO vector + * @param src source IO vector + * @param src_num size of source IO vector + * @param to dst offset where write to will start + * @param from src offset where write from will start + * @param bytes number of bytes to be copied + * + * @return number of bytes written from src to dst + */ +uint64_t cas_data_cpy(struct bio_vec *dst, uint64_t dst_num, + struct bio_vec *src, uint64_t src_num, + uint64_t to, uint64_t from, uint64_t bytes); + +#endif /* UTILS_DATA_H_ */ diff --git a/modules/cas_cache/utils/utils_gc.c b/modules/cas_cache/utils/utils_gc.c new file mode 100644 index 000000000..d3f720856 --- /dev/null +++ b/modules/cas_cache/utils/utils_gc.c @@ -0,0 +1,78 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "utils_gc.h" +#include + +#if defined (CAS_GARBAGE_COLLECTOR) +struct cas_vfree_item { + struct llist_head list; + struct work_struct ws; +}; + +static DEFINE_PER_CPU(struct cas_vfree_item, cas_vfree_item); + +static atomic_t freed = ATOMIC_INIT(0); + +static void cas_garbage_collector(struct work_struct *w) +{ + struct cas_vfree_item *item = container_of(w, struct cas_vfree_item, + ws); + struct llist_node *llnode = llist_del_all(&item->list); + + while (llnode) { + void *item = llnode; + + llnode = llnode->next; + atomic_dec(&freed); + vfree(item); + } +} + +void cas_vfree(const void *addr) +{ + struct cas_vfree_item *item = this_cpu_ptr(&cas_vfree_item); + + atomic_inc(&freed); + + if (llist_add((struct llist_node *)addr, &item->list)) + schedule_work(&item->ws); +} + +void cas_garbage_collector_init(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cas_vfree_item *item; + + item = &per_cpu(cas_vfree_item, i); + init_llist_head(&item->list); + INIT_WORK(&item->ws, cas_garbage_collector); + } +} + +void cas_garbage_collector_deinit(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cas_vfree_item *item; + + item = &per_cpu(cas_vfree_item, i); + while (work_pending(&item->ws)) + schedule(); + } + + WARN(atomic_read(&freed) != 0, + OCF_PREFIX_SHORT" Not all memory deallocated\n"); +} +#else +void cas_garbage_collector_init(void) {}; + +void cas_garbage_collector_deinit(void) {}; + +void cas_vfree(const void *addr) { vfree(addr); }; +#endif diff --git a/modules/cas_cache/utils/utils_gc.h b/modules/cas_cache/utils/utils_gc.h new file mode 100644 index 000000000..3d3af0d9f --- /dev/null +++ b/modules/cas_cache/utils/utils_gc.h @@ -0,0 +1,16 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef UTILS_GC_H_ +#define UTILS_GC_H_ + + +void cas_garbage_collector_init(void); + +void cas_garbage_collector_deinit(void); + +void cas_vfree(const void *addr); + +#endif /* UTILS_GC_H_ */ diff --git a/modules/cas_cache/utils/utils_nvme.c b/modules/cas_cache/utils/utils_nvme.c new file mode 100644 index 000000000..a2dd3d691 --- /dev/null +++ b/modules/cas_cache/utils/utils_nvme.c @@ -0,0 +1,583 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#if defined(CAS_NVME_PARTIAL) + +#include "cas_cache.h" +#include "utils_nvme.h" +#include "utils_blk.h" + +#include +#include + + +int cas_nvme_get_nsid(struct block_device *bdev, unsigned int *nsid) +{ + int ret = 0; + + /* + * Maximum NSID is 0xFFFFFFFF, so theoretically there is no free + * room for error code. However it's unlikely that there will ever + * be device with such number of namespaces, so we treat this value + * as it was signed. Then in case of negative value we interpret it + * as an error code. Moreover in case of error we can be sure, that + * we deal with non-NVMe device, because this ioctl should never + * fail with NVMe driver. + */ + ret = ioctl_by_bdev(bdev, NVME_IOCTL_ID, (unsigned long)NULL); + if (ret < 0) + return ret; + + *nsid = (unsigned int)ret; + return 0; +} + +#define NVME_ID_CNS_NS 0x00 +#define NVME_ID_CNS_CTRL 0x01 + +int cas_nvme_identify_ns(struct block_device *bdev, unsigned int nsid, + struct nvme_id_ns *ns) +{ + struct nvme_admin_cmd cmd = { }; + unsigned long __user buffer; + int ret = 0; + + buffer = cas_vm_mmap(NULL, 0, sizeof(*ns)); + if (IS_ERR((void *)buffer)) + return PTR_ERR((void *)buffer); + + cmd.opcode = nvme_admin_identify; + cmd.nsid = cpu_to_le32(nsid); + cmd.addr = (__u64)buffer; + cmd.data_len = sizeof(*ns); + cmd.cdw10 = NVME_ID_CNS_NS; + ret = ioctl_by_bdev(bdev, NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); + if (ret < 0) + goto out; + + ret = copy_from_user(ns, (void *)buffer, sizeof(*ns)); + if (ret > 0) + ret = -EINVAL; +out: + cas_vm_munmap(buffer, sizeof(*ns)); + return ret; +} + +int cas_nvme_identify_ns_contorller(struct file *file, struct nvme_id_ns *ns) +{ + struct nvme_admin_cmd cmd = { }; + unsigned long __user buffer; + mm_segment_t old_fs; + int ret = 0; + + buffer = cas_vm_mmap(NULL, 0, sizeof(*ns)); + if (IS_ERR((void *)buffer)) + return PTR_ERR((void *)buffer); + + cmd.opcode = nvme_admin_identify; + cmd.nsid = 1; + cmd.addr = (__u64)buffer; + cmd.data_len = sizeof(*ns); + cmd.cdw10 = NVME_ID_CNS_NS; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->unlocked_ioctl(file, + NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); + set_fs(old_fs); + if (ret < 0) + goto out; + + ret = copy_from_user(ns, (void *)buffer, sizeof(*ns)); + if (ret > 0) + ret = -EINVAL; +out: + cas_vm_munmap(buffer, sizeof(*ns)); + return ret; +} + +#if defined(CAS_NVME_FULL) + +#define FORMAT_WORKAROUND_NOT_NEED 0 +#define FORMAT_WORKAROUND_NEED 1 + +static int __cas_nvme_check_fw(struct nvme_id_ctrl *id_ctrl) +{ + /* + * If firmware is older then 8DV101H0 we need do + * workaround - make format twice. We need to compare + * only 5 last characters. + */ + + return (strncmp(&id_ctrl->fr[3], "101H0", 5) < 0) ? + FORMAT_WORKAROUND_NEED : + FORMAT_WORKAROUND_NOT_NEED; +} + +int cas_nvme_identify_ctrl(struct block_device *bdev, + struct nvme_id_ctrl *id_ctrl) +{ + struct nvme_admin_cmd cmd = { }; + unsigned long __user buffer; + int ret = 0; + + buffer = cas_vm_mmap(NULL, 0, sizeof(*id_ctrl)); + if (IS_ERR((void *)buffer)) + return PTR_ERR((void *)buffer); + + cmd.opcode = nvme_admin_identify; + cmd.addr = (__u64)buffer; + cmd.data_len = sizeof(*id_ctrl); + cmd.cdw10 = NVME_ID_CNS_CTRL; + + ret = ioctl_by_bdev(bdev, NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); + if (ret < 0) + goto out; + + ret = copy_from_user(id_ctrl, (void *)buffer, sizeof(*id_ctrl)); + if (ret > 0) + ret = -EINVAL; + +out: + cas_vm_munmap(buffer, sizeof(*id_ctrl)); + return ret; +} + +static int _cas_nvme_format_bdev(struct block_device *bdev, unsigned int nsid, + int lbaf, int ms) +{ + struct nvme_admin_cmd cmd = { }; + + cmd.opcode = nvme_admin_format_nvm; + cmd.nsid = nsid; + cmd.cdw10 = lbaf | ms<<4; + cmd.timeout_ms = 1200000; + return ioctl_by_bdev(bdev, NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); +} + +static int _cas_nvme_controller_identify(struct file *character_device_file, + unsigned long __user buffer) +{ + struct nvme_admin_cmd cmd = { }; + mm_segment_t old_fs; + int ret; + + old_fs = get_fs(); + + cmd.opcode = nvme_admin_identify; + cmd.nsid = 0; + cmd.addr = (__u64)buffer; + /* 1 - identify contorller, 0 - identify namespace */ + cmd.cdw10 = 1; + cmd.data_len = 0x1000; + + set_fs(KERNEL_DS); + ret = character_device_file->f_op->unlocked_ioctl(character_device_file, + NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); + set_fs(old_fs); + return ret; +} + +static int _cas_nvme_format_controller(struct file *character_device_file, + int lbaf, bool sbnsupp) +{ + struct nvme_admin_cmd cmd = { }; + mm_segment_t old_fs; + int ret; + + old_fs = get_fs(); + + /* Send format command to device */ + cmd.opcode = nvme_admin_format_nvm; + cmd.nsid = 0xFFFFFFFF; + cmd.cdw10 = lbaf | sbnsupp << 4; + cmd.timeout_ms = 120000; + cmd.addr = 0; + + set_fs(KERNEL_DS); + ret = character_device_file->f_op->unlocked_ioctl(character_device_file, + NVME_IOCTL_ADMIN_CMD, (unsigned long)&cmd); + set_fs(old_fs); + return ret; +} + +static inline int find_lbaf(struct nvme_lbaf *lbaf, int cnt, int atomic) +{ + int ms = atomic ? 8 : 0; + int i; + + for (i = 0; i <= cnt; ++i) + if (lbaf[i].ms == ms && lbaf[i].ds == 9) + return i; + + return -EINVAL; +} + +/* context for async probe */ +struct _probe_context +{ + struct completion cmpl; + struct ocf_metadata_probe_status status; + int error; +}; + +static void _cas_nvme_probe_cmpl(void *priv, int error, + struct ocf_metadata_probe_status *status) +{ + struct _probe_context *ctx = (struct _probe_context*)priv; + + ctx->error = error; + if (!error) { + ctx->status = *status; + } + + complete(&ctx->cmpl); +} + +static int _cas_nvme_preformat_check(struct block_device *bdev, int force) +{ + ocf_volume_t volume; + struct _probe_context probe_ctx; + int ret = 0; + + if (bdev != bdev->bd_contains) + return -KCAS_ERR_A_PART; + + if (cas_blk_get_part_count(bdev) > 1 && !force) + return -KCAS_ERR_CONTAINS_PART; + + ret = cas_blk_open_volume_by_bdev(&volume, bdev); + if (ret == -KCAS_ERR_NVME_BAD_FORMAT) { + /* Current format is not supported by CAS, so we can be sure + * that there is no dirty data. Do format + */ + return 0; + } else if (ret) { + /* An error occurred, stop processing */ + return ret; + } + + init_completion(&probe_ctx.cmpl); + ocf_metadata_probe(cas_ctx, volume, _cas_nvme_probe_cmpl, &probe_ctx); + if (wait_for_completion_interruptible(&probe_ctx.cmpl)) { + ocf_volume_close(volume); + return -OCF_ERR_FLUSHING_INTERRUPTED; + } + + if (probe_ctx.error == -ENODATA) { + /* Cache was not detected on this device + * NVMe can be formated + */ + ret = 0; + } else if (probe_ctx.error == -EBUSY) { + ret = -OCF_ERR_NOT_OPEN_EXC; + } else if (probe_ctx.error) { + /* Some error occurred, we do not have sure about clean cache */ + ret = -KCAS_ERR_FORMAT_FAILED; + } else { + /* Check if cache was closed in proper way */ + if (!probe_ctx.status.clean_shutdown || + probe_ctx.status.cache_dirty) { + /* Dirty shutdown */ + ret = -KCAS_ERR_DIRTY_EXISTS_NVME; + } + + if (force) { + /* Force overwrites dirty shutdown */ + ret = 0; + } + } + + ocf_volume_close(volume); + return ret; +} + +static int _cas_nvme_format_namespace_by_path(const char *device_path, + int metadata_mode, int force) +{ + struct nvme_id_ns *ns; + struct nvme_id_ctrl *id; + + unsigned int nsid, sbnsupp = 0; + int best_lbaf = 0; + int ret = 0; + struct block_device *bdev; + char holder[] = "CAS FORMAT\n"; + + ns = kmalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return -OCF_ERR_NO_MEM; + + id = kmalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + ret = -OCF_ERR_NO_MEM; + goto out1; + } + + bdev = OPEN_BDEV_EXCLUSIVE(device_path, + FMODE_READ | FMODE_WRITE | FMODE_EXCL, holder); + if (IS_ERR(bdev)) { + if (PTR_ERR(bdev) == -EBUSY) + ret = -OCF_ERR_NOT_OPEN_EXC; + else + ret = -OCF_ERR_INVAL_VOLUME_TYPE; + + goto out1; + } + + ret = cas_nvme_get_nsid(bdev, &nsid); + if (ret < 0) { + ret = -KCAS_ERR_NOT_NVME; + goto out2; + } + + ret = _cas_nvme_preformat_check(bdev, force); + if (ret) + goto out2; + + ret = cas_nvme_identify_ns(bdev, nsid, ns); + if (ret < 0) { + ret = -KCAS_ERR_FORMAT_FAILED; + goto out2; + } + + if (metadata_mode == CAS_METADATA_MODE_NORMAL) { + best_lbaf = find_lbaf(ns->lbaf, ns->nlbaf, 0); + sbnsupp = 0; + } else if (metadata_mode == CAS_METADATA_MODE_ATOMIC) { + best_lbaf = find_lbaf(ns->lbaf, ns->nlbaf, 1); + sbnsupp = !(ns->mc & (1<<1)); + } + + if (best_lbaf < 0) { + ret = -KCAS_ERR_FORMAT_FAILED; + goto out2; + } + + ret = cas_nvme_identify_ctrl(bdev, id); + if (ret < 0) { + ret = -KCAS_ERR_FORMAT_FAILED; + goto out2; + } + + if (__cas_nvme_check_fw(id) == FORMAT_WORKAROUND_NEED) { + /* + * If firmware is older then 8DV101H0 we need do + * workaround - make format twice. + */ + ret = _cas_nvme_format_bdev(bdev, nsid, best_lbaf, sbnsupp); + if (ret) + goto out2; + } + + ret = _cas_nvme_format_bdev(bdev, nsid, best_lbaf, sbnsupp); + if (ret) + goto out2; + + ret = ioctl_by_bdev(bdev, BLKRRPART, (unsigned long)NULL); +out2: + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); +out1: + kfree(id); + kfree(ns); + return ret; +} + +static int _cas_nvme_get_bdev_from_controller(struct block_device **bdev, + int major, int minor, int namespace_number) +{ + mm_segment_t old_fs; + char *sys_path; + struct file *file; + char readbuffer[12] = {0}; + char holder[] = "CAS FORMAT\n"; + int ret = 0; + + sys_path = kzalloc(sizeof(char)*MAX_STR_LEN, GFP_KERNEL); + if (!sys_path) + return -OCF_ERR_NO_MEM; + + sprintf(sys_path, "/sys/dev/char/%d:%d/nvme%dn%d/dev", + major, minor, minor, namespace_number); + + file = filp_open(sys_path, O_RDONLY, 0); + kfree(sys_path); + if (IS_ERR(file)) + return -KCAS_ERR_FORMAT_FAILED; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->read(file, readbuffer, sizeof(readbuffer), + &file->f_pos); + set_fs(old_fs); + filp_close(file, 0); + if (ret < 0) + return -KCAS_ERR_FORMAT_FAILED; + + ret = sscanf(readbuffer, "%d:%d", &major, &minor); + if (ret < 0) + return -KCAS_ERR_FORMAT_FAILED; + + *bdev = blkdev_get_by_dev(MKDEV(major, minor), + FMODE_READ | FMODE_WRITE | FMODE_EXCL, holder); + if (IS_ERR(*bdev)) + return -OCF_ERR_INVAL_VOLUME_TYPE; + + return 0; +} + +static int _cas_nvme_format_character_device(const char *device_path, + int metadata_mode, int force) +{ + mm_segment_t old_fs; + int ret; + struct file *character_device_file = NULL; + struct nvme_id_ctrl *ctrl; + unsigned long __user buffer; + struct kstat *stat; + struct block_device **ndev = NULL; + int i; + struct nvme_id_ns *ns; + int best_lbaf = 0; + int sbnsupp = 0; + + ctrl = kzalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); + buffer = cas_vm_mmap(NULL, 0, sizeof(*ctrl)); + stat = kmalloc(sizeof(struct kstat), GFP_KERNEL); + ns = kmalloc(sizeof(*ns), GFP_KERNEL); + + old_fs = get_fs(); + + if (!ctrl || !buffer || !stat || !ns) { + ret = -OCF_ERR_NO_MEM; + goto out1; + } + + character_device_file = filp_open(device_path, O_RDWR | O_EXCL, 0); + if (IS_ERR(character_device_file)) { + ret = -OCF_ERR_INVAL_VOLUME_TYPE; + goto out1; + } + + ret = _cas_nvme_controller_identify(character_device_file, buffer); + if (ret < 0) { + ret = KCAS_ERR_FORMAT_FAILED; + goto out1; + } + + ret = copy_from_user(ctrl, (void *)buffer, sizeof(*ctrl)); + if (ret) + goto out1; + + ndev = kmalloc_array(ctrl->nn, sizeof(struct block_device), GFP_KERNEL); + if (!ndev) { + ret = -OCF_ERR_NO_MEM; + goto out1; + } + + set_fs(KERNEL_DS); + ret = vfs_stat(device_path, stat); + set_fs(old_fs); + if (ret) + goto out1; + + for (i = 1; i <= ctrl->nn; i++) { + ret = _cas_nvme_get_bdev_from_controller(&ndev[i-1], + MAJOR(stat->rdev), MINOR(stat->rdev), i); + if (ret) { + i--; + goto cleanup; + } + + ret = _cas_nvme_preformat_check(ndev[i-1], force); + if (ret) + goto cleanup; + } + + ret = cas_nvme_identify_ns_contorller(character_device_file, ns); + if (ret) + goto cleanup; + + if (metadata_mode == CAS_METADATA_MODE_NORMAL) { + best_lbaf = find_lbaf(ns->lbaf, ns->nlbaf, 0); + sbnsupp = 0; + } else if (metadata_mode == CAS_METADATA_MODE_ATOMIC) { + best_lbaf = find_lbaf(ns->lbaf, ns->nlbaf, 1); + sbnsupp = !(ns->mc & (1<<1)); + } + + if (best_lbaf < 0) { + ret = -KCAS_ERR_FORMAT_FAILED; + goto cleanup; + } + + if (__cas_nvme_check_fw(ctrl) == FORMAT_WORKAROUND_NEED) { + /* + * If firmware is older then 8DV101H0 we need do + * workaround - make format twice. + */ + ret = _cas_nvme_format_controller(character_device_file, + best_lbaf, sbnsupp); + if (ret < 0) { + ret = -KCAS_ERR_FORMAT_FAILED; + goto cleanup; + } + } + + ret = _cas_nvme_format_controller(character_device_file, + best_lbaf, sbnsupp); + if (ret < 0) + ret = -KCAS_ERR_FORMAT_FAILED; + +cleanup: + for (i = i-1; i >= 1; i--) { + ret |= ioctl_by_bdev(ndev[i-1], BLKRRPART, (unsigned long)NULL); + blkdev_put(ndev[i-1], FMODE_READ | FMODE_WRITE | FMODE_EXCL); + } + +out1: + kfree(ndev); + kfree(ctrl); + kfree(stat); + + kfree(ns); + cas_vm_munmap(buffer, sizeof(buffer)); + filp_close(character_device_file, 0); + + return ret; +} + +int cas_nvme_format_optimal(const char *device_path, int metadata_mode, + int force) +{ + int ret; + uint8_t type; + + ret = cas_blk_identify_type(device_path, &type); + if (ret == -OCF_ERR_INVAL_VOLUME_TYPE) { + /* An error occurred, stop processing */ + return ret; + } + + if (type == BLOCK_DEVICE_VOLUME || type == ATOMIC_DEVICE_VOLUME) { + ret = _cas_nvme_format_namespace_by_path(device_path, + metadata_mode, force); + } else if (type == NVME_CONTROLLER && false) { + /* + * TODO(rbaldyga): Make it safe with NVMe drives that do not + * handle format change properly. + */ + ret = _cas_nvme_format_character_device(device_path, + metadata_mode, force); + } else { + ret = -OCF_ERR_INVAL_VOLUME_TYPE; + } + + return ret; +} + +#endif + +#endif diff --git a/modules/cas_cache/utils/utils_nvme.h b/modules/cas_cache/utils/utils_nvme.h new file mode 100644 index 000000000..a0f7a8f4f --- /dev/null +++ b/modules/cas_cache/utils/utils_nvme.h @@ -0,0 +1,38 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef UTILS_NVME_H_ +#define UTILS_NVME_H_ + +#if defined(CAS_UAPI_NVME) +#include +#endif + +#if defined(CAS_UAPI_LINUX_NVME) +#include +#endif + +#if defined(CAS_UAPI_LINUX_NVME_IOCTL) +#include +#endif + +#if defined(CAS_NVME_PARTIAL) + +#include + +int cas_nvme_get_nsid(struct block_device *bdev, unsigned int *nsid); +int cas_nvme_identify_ns(struct block_device *bdev, unsigned int nsid, + struct nvme_id_ns *ns); + +#if defined(CAS_NVME_FULL) + +int cas_nvme_format_optimal(const char *device_path, int metadata_mode, + int force); + +#endif /* CAS_NVME_FULL */ + +#endif /* CAS_NVME_PARTIAL */ + +#endif /* UTILS_NVME_H_ */ diff --git a/modules/cas_cache/utils/utils_properties.c b/modules/cas_cache/utils/utils_properties.c new file mode 100644 index 000000000..111868b24 --- /dev/null +++ b/modules/cas_cache/utils/utils_properties.c @@ -0,0 +1,769 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +#define INTERNAL_CALL 0 +#define EXTERNAL_CALL 1 + +#define CAS_PROPERTIES_VERSION 101 + +#define VERSION_STR ".version" + +/* + * Difference between constant and non constant entry is store in LSB + * e.g.: + * cas_property_string in binary 0000 1010 + * cas_property_string_const in binary 0000 1011 + */ + +#define CAS_PROP_UNCONST(type) (type & ~CAS_PROPERTIES_CONST) +#define CAS_PROP_CHECK_CONST(type) (type & CAS_PROPERTIES_CONST) + +enum cas_property_type { + cas_property_string = 10, + cas_property_string_const = + (cas_property_string | CAS_PROPERTIES_CONST), + cas_property_sint = 16, + cas_property_sint_const = (cas_property_sint | CAS_PROPERTIES_CONST), + cas_property_uint = 74, + cas_property_uint_const = (cas_property_uint | CAS_PROPERTIES_CONST), +}; + +struct cas_properties { + struct list_head list; +}; + +struct _cas_property { + uint8_t type; + char *key; + struct list_head item; + union { + void *value; + uint64_t value_uint; + int64_t value_sint; + }; +}; + +struct cas_properties *cas_properties_create(void) +{ + struct cas_properties *props; + int result; + + props = kzalloc(sizeof(*props), GFP_KERNEL); + if (!props) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&props->list); + + result = cas_properties_add_uint(props, VERSION_STR, + CAS_PROPERTIES_VERSION, CAS_PROPERTIES_CONST); + if (result) { + kfree(props); + return ERR_PTR(result); + } + + result = cas_properties_add_uint(props, ".size", 0, + CAS_PROPERTIES_NON_CONST); + if (result) { + kfree(props); + return ERR_PTR(result); + } + + return props; +} + +void cas_properties_destroy(struct cas_properties *props) +{ + struct list_head *curr, *tmp; + struct _cas_property *entry; + + list_for_each_safe(curr, tmp, &props->list) { + entry = list_entry(curr, struct _cas_property, item); + list_del(curr); + if (cas_property_string == CAS_PROP_UNCONST(entry->type)) + kfree(entry->value); + kfree(entry->key); + kfree(entry); + } + + kfree(props); +} + +static uint64_t _cas_prop_get_size(struct cas_properties *props) +{ + struct list_head *curr; + struct _cas_property *entry; + uint64_t size_all = 0; + + list_for_each(curr, &props->list) { + entry = list_entry(curr, struct _cas_property, item); + + size_all += cas_prop_strnlen(entry->key, MAX_STRING_SIZE) + 1; + size_all += sizeof(entry->type); + + switch (CAS_PROP_UNCONST(entry->type)) { + case cas_property_string: + size_all += cas_prop_strnlen(entry->value, + MAX_STRING_SIZE) + 1; + break; + case cas_property_sint: + size_all += sizeof(entry->value_sint); + break; + case cas_property_uint: + size_all += sizeof(entry->value_uint); + break; + default: + return 0; + } + } + + return size_all; +} + +static int _cas_prop_serialize_string(char *buffer, const uint64_t size, + uint64_t *offset, char *value) +{ + uint64_t str_size = 0; + + str_size = cas_prop_strnlen(value, MAX_STRING_SIZE) + 1; + + if ((*offset + str_size) > size) + return -ENOMEM; + + memcpy(buffer + *offset, value, str_size); + *offset += str_size; + + return 0; +} + +static int _cas_prop_parse_string(const char *buffer, const uint64_t size, + uint64_t *offset, char **str) +{ + char *tmp_str = NULL; + uint64_t str_size = 0; + + if (*offset >= size) + return -ENOMEM; + + str_size = cas_prop_strnlen(&buffer[*offset], size - *offset ) + 1; + + if (str_size > size - *offset) { + /* no null terminator at the end of buffer */ + return -ENOMEM; + } + + tmp_str = kstrdup(&buffer[*offset], GFP_KERNEL); + if (!tmp_str) + return -ENOMEM; + + *offset += str_size; + *str = tmp_str; + + return 0; +} + +static int _cas_prop_serialize_int(char *buffer, const uint64_t size, + uint64_t *offset, uint64_t number) +{ + int32_t i; + + /* + * To prevent issue connected with byte order we + * serialize integer byte by byte. + */ + for (i = 0; i < sizeof(number); i++) { + char byte = number & 0xFF; + + if (*offset < size) + buffer[*offset] = byte; + else + return -ENOMEM; + + (*offset)++; + number = number >> 8; + } + + return 0; +} + +static int _cas_prop_serialize_uint(char *buffer, const uint64_t size, + uint64_t *offset, uint64_t number) +{ + return _cas_prop_serialize_int(buffer, size, offset, number); +} + + +static int _cas_prop_serialize_sint(char *buffer, const uint64_t size, + uint64_t *offset, int64_t number) +{ + return _cas_prop_serialize_int(buffer, size, offset, (uint64_t) number); + +} + +static int _cas_prop_parse_int(const char *buffer, + const uint64_t size, uint64_t *offset, uint64_t *number) +{ + int32_t i; + uint64_t byte; + + *number = 0; + + /* + * To prevent issue connected with byte order we + * parse integer byte by byte. + */ + for (i = 0; i < sizeof(*number); i++) { + if (*offset >= size) + return -ENOMEM; + + byte = buffer[*offset] & 0xFF; + byte = byte << (i * 8); + + *number |= byte; + + (*offset)++; + } + + return 0; +} + +static int _cas_prop_parse_uint(const char *buffer, + const uint64_t size, uint64_t *offset, uint64_t *number) +{ + return _cas_prop_parse_int(buffer, size, offset, number); +} + +static int _cas_prop_parse_sint(const char *buffer, + const uint64_t size, uint64_t *offset, int64_t *number) +{ + return _cas_prop_parse_int(buffer, size, offset, (uint64_t *) number); +} + +static int _cas_prop_serialize(struct _cas_property *entry, void *buffer, + const uint64_t size, uint64_t *offset) +{ + uint64_t item_size = 0; + void *item; + int result = 0; + + if (*offset > size) + return -ENOMEM; + + /* + * Each entry is represented in buffer in order as below + * (e.g. in case we have entry with integer) : + * <----- entry -----> + * <- key -><-type-><- integer -> + * <- X bytes -><1 byte><- 8 byte -> + * | | | | + */ + + /* + * First step - serialize key + */ + + item_size = cas_prop_strnlen(entry->key, MAX_STRING_SIZE) + 1; + item = entry->key; + + if ((*offset + item_size) > size) + return -ENOMEM; + + memcpy(buffer + *offset, item, item_size); + *offset += item_size; + + /* + * Second step - serialize type + */ + + item_size = sizeof(entry->type); + item = &entry->type; + + if ((*offset + item_size) > size) + return -ENOMEM; + + memcpy(buffer + *offset, item, item_size); + *offset += item_size; + + /* + * Third step - serialize value + */ + + switch (CAS_PROP_UNCONST(entry->type)) { + case cas_property_string: + /* Serialize string */ + result = _cas_prop_serialize_string(buffer, size, offset, + entry->value); + break; + case cas_property_sint: + /* Serialize signed integer */ + result = _cas_prop_serialize_sint(buffer, size, offset, + entry->value_uint); + break; + case cas_property_uint: + /* Serialize unsigned integer */ + result = _cas_prop_serialize_uint(buffer, size, offset, + entry->value_uint); + break; + default: + result = -EINVAL; + break; + } + + return result; +} + +int cas_properties_serialize(struct cas_properties *props, + struct casdsk_props_conf *caches_serialized_conf) +{ + int result = 0; + uint64_t offset = 0, size; + uint16_t crc = 0; + void *buffer; + struct list_head *curr; + struct _cas_property *entry; + + size = _cas_prop_get_size(props); + if (size == 0) + return -EINVAL; + + buffer = vzalloc(size); + if (!buffer) + return -ENOMEM; + + /* + * Update first entry on list - size of buffer + */ + result = cas_properties_add_uint(props, ".size", size, + CAS_PROPERTIES_CONST); + if (result) + goto error_after_buffer_allocation; + + /* + * Serialize each entry, one by one + */ + list_for_each(curr, &props->list) { + entry = list_entry(curr, struct _cas_property, item); + result = _cas_prop_serialize(entry, buffer, size, &offset); + if (result) + goto error_after_buffer_allocation; + } + + crc = crc16(0, buffer, size); + + caches_serialized_conf->buffer = buffer; + caches_serialized_conf->size = size; + caches_serialized_conf->crc = crc; + return result; + +error_after_buffer_allocation: + vfree(buffer); + return result; +} + +void cas_properties_print(struct cas_properties *props) +{ + int result = 0; + struct list_head *curr; + struct _cas_property *entry; + char *abc; + + /* + * Serialize each entry, one by one + */ + list_for_each(curr, &props->list) { + entry = list_entry(curr, struct _cas_property, item); + printk(KERN_DEBUG "[Upgrade] Key: %s", entry->key); + switch (CAS_PROP_UNCONST(entry->type)) { + case cas_property_string: + printk(", string, "); + abc = (char *)entry->value; + printk("Value: %s ", abc); + break; + case cas_property_sint: + break; + case cas_property_uint: + printk(", uint, "); + printk("Value: %llu ", entry->value_uint); + default: + result = -EINVAL; + break; + } + printk("\n"); + } +} + +static int _cas_prop_parse_version(const char *buffer, uint64_t *offset, + uint64_t *version, int trigger) +{ + int result = 0; + char *key = NULL; + uint8_t type; + + result = _cas_prop_parse_string(buffer, strlen(VERSION_STR) + 1, + offset, &key); + if (result) + goto error_during_parse_key; + + if (strcmp(VERSION_STR, key)) { + result = -EINVAL; + goto error_after_parse_key; + } + + type = buffer[*offset]; + if (cas_property_uint_const != type) { + result = -EINVAL; + goto error_after_parse_key; + } + *offset += sizeof(type); + + result = _cas_prop_parse_uint(buffer, + strlen(VERSION_STR) + 1 + sizeof(type) + + sizeof(*version), offset, version); + if (result) + goto error_after_parse_key; + + /* + * In case that is external call + * we don't need check version. + */ + if (trigger == INTERNAL_CALL && *version != CAS_PROPERTIES_VERSION) { + printk(KERN_ERR "Version of interface using to parse is " + "different than version used to serialize\n"); + result = -EPERM; + } + +error_after_parse_key: + kfree(key); +error_during_parse_key: + return result; +} + +int cas_properites_parse_version(struct casdsk_props_conf *caches_serialized_conf, + uint64_t *version) +{ + uint64_t offset = 0; + char *buffer = NULL; + + buffer = (char *) caches_serialized_conf->buffer; + if (!buffer) + return -EINVAL; + + return _cas_prop_parse_version(buffer, &offset, version, EXTERNAL_CALL); +} + +struct cas_properties * +cas_properites_parse(struct casdsk_props_conf *caches_serialized_conf) +{ + struct cas_properties *props; + char *key = NULL, *value = NULL, *buffer = NULL; + int result; + uint8_t type; + uint64_t uint_value, size = 0, offset = 0, version = 0; + uint16_t crc; + int64_t sint_value; + bool constant = false; + + props = cas_properties_create(); + if (IS_ERR(props)) + return ERR_PTR(-ENOMEM); + + if (!caches_serialized_conf) { + result = -EINVAL; + goto error_after_props_allocation; + } + + buffer = (char *) caches_serialized_conf->buffer; + if (!buffer) { + result = -EINVAL; + goto error_after_props_allocation; + } + + size = caches_serialized_conf->size; + crc = crc16(0, buffer, size); + if (crc != caches_serialized_conf->crc) { + printk(KERN_ERR "Cache configuration corrupted"); + result = -EINVAL; + goto error_after_props_allocation; + } + + /* + * Parse first entry on list - version of interface used to + * serialization + */ + result = _cas_prop_parse_version(buffer, &offset, &version, + INTERNAL_CALL); + if (result) + goto error_after_props_allocation; + + while (offset < size) { + /* + * Parse key of entry + */ + result = _cas_prop_parse_string(buffer, size, &offset, &key); + if (result) + goto error_after_props_allocation; + + /* + * Parse type of entry + */ + if (offset + sizeof(type) > size) { + kfree(key); + goto error_after_props_allocation; + } + + memcpy(&type, buffer + offset, sizeof(type)); + offset += sizeof(type); + + constant = CAS_PROP_CHECK_CONST(type); + type = CAS_PROP_UNCONST(type); + + switch (type) { + case cas_property_string: + /* Parse string */ + result = _cas_prop_parse_string(buffer, size, &offset, + &value); + if (result) + break; + + /* + * Add new entry with string to CAS properties instance + */ + result |= cas_properties_add_string(props, key, value, + constant); + kfree(value); + break; + case cas_property_sint: + /* Parse signed integer */ + result = _cas_prop_parse_sint(buffer, size, &offset, + &sint_value); + /* Add new entry with signed integer to CAS properties + * instance + */ + result |= cas_properties_add_sint(props, key, + sint_value, constant); + break; + case cas_property_uint: + /* Parse unsigned integer */ + result = _cas_prop_parse_uint(buffer, size, &offset, + &uint_value); + /* Add new entry with unsigned integer to CAS properties + * instance + */ + result |= cas_properties_add_uint(props, key, + uint_value, constant); + break; + default: + result = -EINVAL; + break; + } + + /* + * In case when we added new entry, + * we not need hold key value longer. + */ + kfree(key); + + if (result) + goto error_after_props_allocation; + } + + return props; + +error_after_props_allocation: + cas_properties_destroy(props); + return ERR_PTR(result); +} + +static struct _cas_property *_cas_prop_find(const struct cas_properties *props, + const char *key) +{ + struct list_head *curr; + struct _cas_property *entry; + + list_for_each(curr, &props->list) { + entry = list_entry(curr, struct _cas_property, item); + if (strncmp(key, entry->key, MAX_STRING_SIZE) == 0) + return entry; + } + return ERR_PTR(-ENOENT); +} + +static struct _cas_property *_cas_prop_alloc_entry_key(const char *key) +{ + struct _cas_property *entry; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + entry->key = kstrdup(key, GFP_KERNEL); + if (!entry->key) { + kfree(entry); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&entry->item); + + return entry; +} + +/* + * ADD + */ + +int cas_properties_add_uint(struct cas_properties *props, const char *key, + uint64_t value, bool constant) +{ + struct _cas_property *entry; + + /* + * Looks for entry with same key, + * if it is exist - update, if not - create new + */ + entry = _cas_prop_find(props, key); + if (IS_ERR(entry)) { + entry = _cas_prop_alloc_entry_key(key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + list_add_tail(&entry->item, &props->list); + } else if (cas_property_uint != entry->type) { + /* + * We can update only non constant entry, + * so we need compare type only with non constant type. + */ + return -EINVAL; + } + + entry->type = constant ? cas_property_uint_const : cas_property_uint; + entry->value_uint = value; + + return 0; +} + +int cas_properties_add_sint(struct cas_properties *props, const char *key, + int64_t value, bool constant) +{ + struct _cas_property *entry; + + /* + * Looks for entry with same key, + * if it is exist - update, if not - create new + */ + entry = _cas_prop_find(props, key); + if (IS_ERR(entry)) { + entry = _cas_prop_alloc_entry_key(key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + list_add_tail(&entry->item, &props->list); + } else if (cas_property_sint != entry->type) { + /* + * We can update only non constant entry, + * so we need compare type only with non constant type. + */ + return -EINVAL; + } + + entry->type = constant ? cas_property_sint_const : cas_property_sint; + entry->value_sint = value; + + return 0; +} + +int cas_properties_add_string(struct cas_properties *props, const char *key, + const char *value, bool constant) +{ + struct _cas_property *entry; + char *tmp_value = NULL; + + tmp_value = kstrdup(value, GFP_KERNEL); + if (!tmp_value) + return -ENOMEM; + + /* + * Looks for entry with same key, + * if it is exist - update, if not - create new + */ + entry = _cas_prop_find(props, key); + if (IS_ERR(entry)) { + entry = _cas_prop_alloc_entry_key(key); + if (IS_ERR(entry)) { + kfree(tmp_value); + return PTR_ERR(entry); + } + list_add_tail(&entry->item, &props->list); + } else { + if (cas_property_string != entry->type) { + /* + * We can update only non constant entry, + * so we need compare type only with non constant type. + */ + kfree(tmp_value); + return -EINVAL; + } + + kfree(entry->value); + } + + entry->type = constant ? cas_property_string_const : + cas_property_string; + entry->value = tmp_value; + + return 0; +} + +/* + * GET + */ + +int cas_properties_get_uint(struct cas_properties *props, const char *key, + uint64_t *value) +{ + struct _cas_property *entry; + + entry = _cas_prop_find(props, key); + if ((IS_ERR(entry) == 0) && (cas_property_uint == + CAS_PROP_UNCONST(entry->type))) { + *value = entry->value_uint; + return 0; + } + + return IS_ERR(entry) ? PTR_ERR(entry) : -EINVAL; +} + +int cas_properties_get_sint(struct cas_properties *props, const char *key, + int64_t *value) +{ + struct _cas_property *entry; + + entry = _cas_prop_find(props, key); + if ((IS_ERR(entry) == 0) && (cas_property_sint == + CAS_PROP_UNCONST(entry->type))) { + *value = entry->value_sint; + return 0; + } + + return IS_ERR(entry) ? PTR_ERR(entry) : -EINVAL; +} + +int cas_properties_get_string(struct cas_properties *props, const char *key, + char *value, uint32_t size) +{ + struct _cas_property *entry; + + entry = _cas_prop_find(props, key); + if ((IS_ERR(entry) == 0) && (cas_property_string == + CAS_PROP_UNCONST(entry->type))) { + /* Check if size of destination memory is enough */ + if (size < cas_prop_strnlen(entry->value, MAX_STRING_SIZE) + 1) + return -ENOMEM; + + cas_prop_strncpy(value, size, entry->value, + cas_prop_strnlen(entry->value, MAX_STRING_SIZE)); + return 0; + } + + return IS_ERR(entry) ? PTR_ERR(entry) : -EINVAL; +} diff --git a/modules/cas_cache/utils/utils_properties.h b/modules/cas_cache/utils/utils_properties.h new file mode 100644 index 000000000..9bb7d1840 --- /dev/null +++ b/modules/cas_cache/utils/utils_properties.h @@ -0,0 +1,153 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef UTILS_PROPERTIES_H_ +#define UTILS_PROPERTIES_H_ + +#ifdef __KERNEL__ +#define cas_prop_strncpy(dest, dest_size, src, src_size) \ + strlcpy(dest, src, dest_size) +#define cas_prop_strnlen(string, size) strnlen(string, size) +#else +#define cas_prop_strncpy(dest, dest_size, src, src_size) \ + strncpy(dest, src, src_size) +#define cas_prop_strnlen(string, size) strlen(string) +#endif + +#include "../../cas_disk/cas_disk.h" + +#define MAX_STRING_SIZE 4095 + +#define CAS_PROPERTIES_NON_CONST false +#define CAS_PROPERTIES_CONST true + +/** + * @file utils_properties.h + * @brief CAS cache interface for collect and serialization CAS properties + */ + +/** + * @brief Handler for instance of CAS properties + */ +struct cas_properties; + +/** + * @brief Initialize instance of CAS properties + * + * @return Handler to instance of interface + */ +struct cas_properties *cas_properties_create(void); + +/** + * @brief De-initialize instance of CAS properties + * + * @param props Handler to instance to de-initialize + */ +void cas_properties_destroy(struct cas_properties *props); + +/** + * @brief Serialize given CAS properties instance to continuous buffer + * + * @param props instance of CAS properties + * @param idisk conf instance of CAS properties + * @return result of serialize CAS properties + */ +int cas_properties_serialize(struct cas_properties *props, + struct casdsk_props_conf *caches_serialized_conf); + +/** + * @brief Parse of first entry given continuous buffer to get version of + * interface which been used to serialize + * + * @param buffer pointer to continuous buffer with serialized CAS properties + * @param version pointer to memory where we will put version + * @return result of getting version, 0 success + */ +int cas_properites_parse_version(struct casdsk_props_conf *caches_serialized_conf, + uint64_t *version); + +/** + * @brief Parse of given continuous buffer to CAS properties instance + * + * @param buffer pointer to continuous buffer with serialized CAS properties + * @return handler to CAS properties instance + */ +struct cas_properties * +cas_properites_parse(struct casdsk_props_conf *caches_serialized_conf); + +/** + * @brief Add unsigned integer to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value value of variable + * @param private if true value cannot be updated + * @return result of adding 0 success + */ +int cas_properties_add_uint(struct cas_properties *props, const char *key, + uint64_t value, bool private); + +/** + * @brief Add signed integer to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value value of variable + * @param private if true value cannot be updated + * @return result of adding 0 success + */ +int cas_properties_add_sint(struct cas_properties *props, const char *key, + int64_t value, bool private); + +/** + * @brief Add string to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value value of variable + * @param private if true value cannot be updated + * @return result of adding 0 success + */ +int cas_properties_add_string(struct cas_properties *props, const char *key, + const char *value, bool private); + +/** + * @brief Get unsigned integer to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value pointer to memory where we will put value + * @return result of getting 0 success + */ +int cas_properties_get_uint(struct cas_properties *props, const char *key, + uint64_t *value); + +/** + * @brief Get signed integer to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value pointer to memory where we will put value + * @return result of getting 0 success + */ +int cas_properties_get_sint(struct cas_properties *props, const char *key, + int64_t *value); + +/** + * @brief Get string integer to CAS properties instance + * + * @param props CAS properties instance to add variable + * @param key key paired with variable + * @param value pointer to memory where we will put value + * @param size size of destination memory + * @return result of getting 0 success, 1 error, 2 not enough space + * in destination + */ +int cas_properties_get_string(struct cas_properties *props, const char *key, + char *value, uint32_t size); + + +void cas_properties_print(struct cas_properties *props); +#endif /* UTILS_PROPERTIES_H_ */ diff --git a/modules/cas_cache/utils/utils_rpool.c b/modules/cas_cache/utils/utils_rpool.c new file mode 100644 index 000000000..560579c5f --- /dev/null +++ b/modules/cas_cache/utils/utils_rpool.c @@ -0,0 +1,262 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "ocf/ocf.h" +#include "utils_rpool.h" +#include "ocf_env.h" +#include "../cas_cache.h" + +#define CAS_UTILS_RPOOL_DEBUG 0 +#if 1 == CAS_UTILS_RPOOL_DEBUG +#define CAS_DEBUG_TRACE() \ + printk(KERN_INFO "[Utils][RPOOL] %s\n", __func__) + +#define CAS_DEBUG_MSG(msg) \ + printk(KERN_INFO "[Utils][RPOOL] %s - %s\n", __func__, msg) + +#define CAS_DEBUG_PARAM(format, ...) \ + printk(KERN_INFO "[Utils][RPOOL] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define CAS_DEBUG_TRACE() +#define CAS_DEBUG_MSG(msg) +#define CAS_DEBUG_PARAM(format, ...) +#endif + +struct _cas_reserve_pool_per_cpu { + spinlock_t lock; + struct list_head list; + atomic_t count; +}; + +struct cas_reserve_pool { + uint32_t limit; + char *name; + uint32_t entry_size; + struct _cas_reserve_pool_per_cpu *rpools; +}; + +struct _cas_rpool_pre_alloc_info { + struct work_struct ws; + struct cas_reserve_pool *rpool_master; + cas_rpool_new rpool_new; + void *allocator_ctx; + struct completion cmpl; + int error; +}; + +#define RPOOL_ITEM_TO_ENTRY(rpool, item) \ + (void *)((unsigned long)item + sizeof(struct list_head) \ + - rpool->entry_size) + +#define RPOOL_ENTRY_TO_ITEM(rpool, entry) \ + (struct list_head *)((unsigned long)entry + rpool->entry_size \ + - sizeof(struct list_head)) + +void _cas_rpool_pre_alloc_do(struct work_struct *ws) +{ + struct _cas_rpool_pre_alloc_info *info = + container_of(ws, struct _cas_rpool_pre_alloc_info, ws); + struct cas_reserve_pool *rpool_master = info->rpool_master; + struct _cas_reserve_pool_per_cpu *current_rpool; + struct list_head *item; + void *entry; + int i, cpu; + + CAS_DEBUG_TRACE(); + + cpu = smp_processor_id(); + current_rpool = &rpool_master->rpools[cpu]; + + for (i = 0; i < rpool_master->limit; i++) { + entry = info->rpool_new(info->allocator_ctx, cpu); + if (!entry) { + info->error = -ENOMEM; + complete(&info->cmpl); + return; + } + item = RPOOL_ENTRY_TO_ITEM(rpool_master, entry); + list_add_tail(item, ¤t_rpool->list); + atomic_inc(¤t_rpool->count); + } + + CAS_DEBUG_PARAM("Added [%d] pre allocated items to reserve poll [%s]" + " for cpu %d", atomic_read(¤t_rpool->count), + rpool_master->name, cpu); + + complete(&info->cmpl); +} + + +int _cas_rpool_pre_alloc_schedule(int cpu, + struct _cas_rpool_pre_alloc_info *info) +{ + init_completion(&info->cmpl); + INIT_WORK(&info->ws, _cas_rpool_pre_alloc_do); + schedule_work_on(cpu, &info->ws); + schedule(); + + wait_for_completion(&info->cmpl); + return info->error; +} + +void cas_rpool_destroy(struct cas_reserve_pool *rpool_master, + cas_rpool_del rpool_del, void *allocator_ctx) +{ + int i, cpu_no = num_online_cpus(); + struct _cas_reserve_pool_per_cpu *current_rpool = NULL; + struct list_head *item = NULL, *next = NULL; + void *entry; + + CAS_DEBUG_TRACE(); + + if (!rpool_master) + return; + + if (!rpool_master->rpools) { + kfree(rpool_master); + return; + } + + for (i = 0; i < cpu_no; i++) { + current_rpool = &rpool_master->rpools[i]; + + CAS_DEBUG_PARAM("Destroyed reserve poll [%s] for cpu %d", + rpool_master->name, i); + + if (!atomic_read(¤t_rpool->count)) + continue; + + list_for_each_safe(item, next, ¤t_rpool->list) { + entry = RPOOL_ITEM_TO_ENTRY(rpool_master, item); + list_del(item); + rpool_del(allocator_ctx, entry); + atomic_dec(¤t_rpool->count); + } + + if (atomic_read(¤t_rpool->count)) { + printk(KERN_CRIT "Not all object from reserve poll" + "[%s] deallocated\n", rpool_master->name); + WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n"); + } + } + + kfree(rpool_master->rpools); + kfree(rpool_master); +} + +struct cas_reserve_pool *cas_rpool_create(uint32_t limit, char *name, + uint32_t entry_size, cas_rpool_new rpool_new, + cas_rpool_del rpool_del, void *allocator_ctx) +{ + int i, cpu_no = num_online_cpus(); + struct cas_reserve_pool *rpool_master = NULL; + struct _cas_reserve_pool_per_cpu *current_rpool = NULL; + struct _cas_rpool_pre_alloc_info info; + + CAS_DEBUG_TRACE(); + + memset(&info, 0, sizeof(info)); + + rpool_master = kzalloc(sizeof(*rpool_master), GFP_KERNEL); + if (!rpool_master) + goto error; + + rpool_master->rpools = kzalloc(sizeof(*rpool_master->rpools) * cpu_no, + GFP_KERNEL); + if (!rpool_master->rpools) + goto error; + + rpool_master->limit = limit; + rpool_master->name = name; + rpool_master->entry_size = entry_size; + + info.rpool_master = rpool_master; + info.rpool_new = rpool_new; + info.allocator_ctx = allocator_ctx; + + for (i = 0; i < cpu_no; i++) { + current_rpool = &rpool_master->rpools[i]; + spin_lock_init(¤t_rpool->lock); + INIT_LIST_HEAD(¤t_rpool->list); + + if (_cas_rpool_pre_alloc_schedule(i, &info)) + goto error; + + CAS_DEBUG_PARAM("Created reserve poll [%s] for cpu %d", + rpool_master->name, i); + } + + return rpool_master; +error: + + cas_rpool_destroy(rpool_master, rpool_del, allocator_ctx); + return NULL; +} + +#define LIST_FIRST_ITEM(head) head.next + +void *cas_rpool_try_get(struct cas_reserve_pool *rpool_master, int *cpu) +{ + unsigned long flags; + struct _cas_reserve_pool_per_cpu *current_rpool = NULL; + struct list_head *item = NULL; + void *entry = NULL; + + CAS_DEBUG_TRACE(); + + *cpu = smp_processor_id(); + current_rpool = &rpool_master->rpools[*cpu]; + + spin_lock_irqsave(¤t_rpool->lock, flags); + + if (!list_empty(¤t_rpool->list)) { + item = LIST_FIRST_ITEM(current_rpool->list); + entry = RPOOL_ITEM_TO_ENTRY(rpool_master, item); + list_del(item); + atomic_dec(¤t_rpool->count); + } + + spin_unlock_irqrestore(¤t_rpool->lock, flags); + + CAS_DEBUG_PARAM("[%s]Removed item from reserve pool [%s] for cpu [%d], " + "items in pool %d", rpool_master->name, + item == NULL ? "SKIPPED" : "OK", *cpu, + atomic_read(¤t_rpool->count)); + + return entry; +} + +int cas_rpool_try_put(struct cas_reserve_pool *rpool_master, void *entry, int cpu) +{ + int ret = 0; + unsigned long flags; + struct _cas_reserve_pool_per_cpu *current_rpool = NULL; + struct list_head *item; + + CAS_DEBUG_TRACE(); + + current_rpool = &rpool_master->rpools[cpu]; + + spin_lock_irqsave(¤t_rpool->lock, flags); + + if (atomic_read(¤t_rpool->count) >= rpool_master->limit) { + ret = 1; + goto error; + } + + item = RPOOL_ENTRY_TO_ITEM(rpool_master, entry); + list_add_tail(item, ¤t_rpool->list); + + atomic_inc(¤t_rpool->count); + +error: + CAS_DEBUG_PARAM("[%s]Added item to reserve pool [%s] for cpu [%d], " + "items in pool %d", rpool_master->name, + ret == 1 ? "SKIPPED" : "OK", cpu, + atomic_read(¤t_rpool->count)); + spin_unlock_irqrestore(¤t_rpool->lock, flags); + return ret; +} diff --git a/modules/cas_cache/utils/utils_rpool.h b/modules/cas_cache/utils/utils_rpool.h new file mode 100644 index 000000000..198cef5a2 --- /dev/null +++ b/modules/cas_cache/utils/utils_rpool.h @@ -0,0 +1,28 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_RPOOL_H__ +#define __CAS_RPOOL_H__ + +#define CAS_RPOOL_MIN_SIZE_ITEM sizeof(struct list_head) + +struct cas_reserve_pool; + +typedef void (*cas_rpool_del)(void *allocator_ctx, void *item); +typedef void *(*cas_rpool_new)(void *allocator_ctx, int cpu); + +struct cas_reserve_pool *cas_rpool_create(uint32_t limit, char *name, + uint32_t item_size, cas_rpool_new rpool_new, + cas_rpool_del rpool_del, void *allocator_ctx); + +void cas_rpool_destroy(struct cas_reserve_pool *rpool, + cas_rpool_del rpool_del, void *allocator_ctx); + +void *cas_rpool_try_get(struct cas_reserve_pool *rpool, int *cpu); + +int cas_rpool_try_put(struct cas_reserve_pool *rpool, void *item, int cpu); + +#endif /* __CAS_RPOOL_H__ */ + diff --git a/modules/cas_cache/volume/obj_blk.h b/modules/cas_cache/volume/obj_blk.h new file mode 100644 index 000000000..66ecb79de --- /dev/null +++ b/modules/cas_cache/volume/obj_blk.h @@ -0,0 +1,53 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __OBJ_BLK_H__ +#define __OBJ_BLK_H__ + +#include "vol_atomic_dev_bottom.h" +#include "vol_block_dev_bottom.h" +#include "vol_block_dev_top.h" + +struct casdsk_disk; + +struct bd_object { + struct casdsk_disk *dsk; + struct block_device *btm_bd; + /** + * This denotes state of volatile write cache of the device. + * This is set to true when: + * - opening the device + * - when writing to a device without FUA/FLUSH flags + * This is set to false when: + * - FLUSH request is completed on device. + * When it is false + * - FLUSH requests from upper layer are NOT passed to the device. + */ + atomic_t potentially_dirty; + + uint32_t expobj_valid : 1; + /*!< Bit indicates that exported object was created */ + + uint32_t expobj_locked : 1; + /*!< Non zero value indicates data exported object is locked */ + + uint32_t opened_by_bdev : 1; + /*!< Opened by supplying bdev manually */ + + struct atomic_dev_params atomic_params; + + atomic64_t pending_rqs; + /*!< This fields describes in flight IO requests */ + + struct workqueue_struct *workqueue; + /*< Workqueue for internally trigerred I/O */ +}; + +static inline struct bd_object *bd_object(ocf_volume_t vol) +{ + return ocf_volume_get_priv(vol); +} + +#endif /* __OBJ_BLK_H__ */ diff --git a/modules/cas_cache/volume/vol_atomic_dev_bottom.c b/modules/cas_cache/volume/vol_atomic_dev_bottom.c new file mode 100644 index 000000000..2d76d73ee --- /dev/null +++ b/modules/cas_cache/volume/vol_atomic_dev_bottom.c @@ -0,0 +1,1199 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#if defined(CAS_NVME_FULL) + +#include +#include + +#define CAS_DEBUG_IO_ATOMIC 0 + +#if 1 == CAS_DEBUG_IO_ATOMIC +#define CAS_DEBUG_TRACE() printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d\n", __func__, __LINE__) + +#define CAS_DEBUG_MSG(msg) printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d - %s\n", __func__, __LINE__, msg) + +#define CAS_DEBUG_PARAM(format, ...) printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d - "format"\n", __func__, \ + __LINE__, ##__VA_ARGS__) +#else +#define CAS_DEBUG_TRACE() +#define CAS_DEBUG_MSG(msg) +#define CAS_DEBUG_PARAM(format, ...) +#endif + +#define ADMIN_TIMEOUT (60 * HZ) + +struct cas_atomic_io { + ocf_volume_t volume; + + struct cas_atomic_io *master; + atomic_t req_remaining; + atomic_t potential_dirty; + uint32_t count; + + uint64_t addr; + uint32_t bytes; + uint32_t start; + uint32_t end; + + int error; + unsigned dir:1; + unsigned metadata:1; + unsigned discard:1; + unsigned long flags; + + ocf_end_io_t cmpl_fn; + void *cmpl_context; + + struct blk_data *data; + uint32_t bvec_size; + + struct nvme_command cmd; + struct bio *bio; + struct request *request; + + struct bio_vec_iter iter; +}; + +static struct ocf_mpool *atomic_io_allocator; + +static inline uint32_t cas_atomic_max_io_sectors(void) +{ + /* TODO Take into account max IO size of bottom device */ + return 128 * KiB / (SECTOR_SIZE + OCF_ATOMIC_METADATA_SIZE); +} + +static inline uint32_t cas_atomic_size_of(uint32_t size) +{ + BUG_ON(size % SECTOR_SIZE); + return size + (size / SECTOR_SIZE * OCF_ATOMIC_METADATA_SIZE); +} + +static void cas_atomic_dealloc(struct cas_atomic_io *atomics) +{ + uint32_t i; + + for (i = 0; i < atomics->count; i++) { + struct cas_atomic_io *this = &atomics[i]; + + if (this->request && !IS_ERR(this->request)) { + blk_mq_free_request(this->request); + this->request = NULL; + } + + if (this->bio) + bio_put(this->bio); + + if (this->data) { + cas_ctx_data_secure_erase(this->data); + cas_ctx_data_free(this->data); + } + } + + ocf_mpool_del(atomic_io_allocator, atomics, atomics->count); +} + +static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool write_zero) +{ + /* Get max size of IO */ + const uint32_t max_io_size = cas_atomic_max_io_sectors() + * SECTOR_SIZE; + + /* Get number of IOs to be issued */ + uint32_t ios_count; + ocf_cache_t cache = ocf_volume_get_cache(io->volume); + + uint64_t addr = io->addr; + uint32_t i, bytes = io->bytes; + uint32_t increase_sectors_start = 0, increase_sectors_end = 0; + struct cas_atomic_io *atoms; + + if (dir == OCF_WRITE && !write_zero) { + /* TODO: this logic is probably no longer required */ + BUG_ON(!cache); + increase_sectors_start = + ocf_metadata_check_invalid_before(cache, addr); + + increase_sectors_end = + ocf_metadata_check_invalid_after(cache, addr, + io->bytes); + increase_sectors_start *= 512; + increase_sectors_end *= 512; + + if (increase_sectors_start) { + bytes += increase_sectors_start; + addr -= increase_sectors_start; + } + + if (increase_sectors_end) + bytes += increase_sectors_end; + } + + /* Get number of IOs to be issued */ + ios_count = DIV_ROUND_UP(bytes, max_io_size); + + atoms = ocf_mpool_new(atomic_io_allocator, + ios_count); + if (!atoms) + return NULL; + + CAS_DEBUG_PARAM("Addr = %llu, bytes = %u", io->addr, io->bytes); + + /* setup master IO */ + atomic_set(&atoms->req_remaining, ios_count); + + atoms->count = ios_count; + atoms->cmpl_fn = io->end; + atoms->cmpl_context = io; + + for (i = 0; i < ios_count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + this->master = atoms; + this->addr = addr; + this->bytes = min(bytes, max_io_size); + this->dir = dir; + this->flags = io->flags; + this->volume = io->volume; + + CAS_DEBUG_PARAM("Sub-atomic IO (%u), Addr = %llu, bytes = %u", + i, this->addr, this->bytes); + + addr += this->bytes; + bytes -= this->bytes; + + /* Allocate BIO data vector with pages */ + this->bvec_size = cas_atomic_size_of(this->bytes); + this->bvec_size = DIV_ROUND_UP(this->bvec_size, PAGE_SIZE); + + if (write_zero || increase_sectors_start || + increase_sectors_end) + this->data = cas_ctx_data_zalloc(this->bvec_size); + else + this->data = cas_ctx_data_alloc(this->bvec_size); + + if (!this->data) + goto cas_atomic_alloc_ERROR; + + /* Set length of last page */ + this->data->vec[this->bvec_size - 1].bv_len = + cas_atomic_size_of(this->bytes) % PAGE_SIZE; + + CAS_DEBUG_PARAM("Sub-atomic IO (%u), BIO vector size = %u, " + "alignment %u", i, this->bvec_size, + this->data->vec[this->bvec_size - 1].bv_len); + + this->start = min(this->bytes, increase_sectors_start); + increase_sectors_start -= this->start; + } + BUG_ON(bytes); + + for (i = ios_count; i && increase_sectors_end; i--) { + struct cas_atomic_io *this = &atoms[i - 1]; + + this->end = min(this->bytes, increase_sectors_end); + increase_sectors_end -= this->end; + } + + return atoms; + +cas_atomic_alloc_ERROR: + + if (atoms) + cas_atomic_dealloc(atoms); + + return NULL; +} + +static int cas_atomic_rd_complete(struct cas_atomic_io *atom) +{ + struct bio_vec_iter *dst, src; + uint32_t copied; + const uint32_t size = OCF_ATOMIC_METADATA_SIZE; + + uint32_t bytes = atom->bytes; + + CAS_DEBUG_TRACE(); + + /* Initialize iterators */ + cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); + dst = &atom->iter; + + BUG_ON(bytes % SECTOR_SIZE); + BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); + + copied = 0; + while (bytes) { + /* Copy data */ + copied += cas_io_iter_cpy(dst, &src, SECTOR_SIZE); + + /* Omit metadata */ + copied += cas_io_iter_move(&src, size); + + bytes -= SECTOR_SIZE; + } + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static int cas_atomic_rd_metadata_complete(struct cas_atomic_io *atom) +{ + struct bio_vec_iter *dst, src; + uint32_t copied; + const uint32_t size = OCF_ATOMIC_METADATA_SIZE; + + uint32_t bytes = atom->bytes; + + CAS_DEBUG_TRACE(); + + /* Initialize iterators */ + cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); + dst = &atom->iter; + + BUG_ON(bytes % SECTOR_SIZE); + BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); + + copied = 0; + while (bytes) { + /* Copy data */ + copied += cas_io_iter_move(&src, SECTOR_SIZE); + + /* Omit metadata */ + copied += cas_io_iter_cpy(dst, &src, size); + + bytes -= SECTOR_SIZE; + } + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static int cas_atomic_rd_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + uint32_t moved; + + /* Store BIO vector iterator, when read completed then it will be + * known were place data + */ + cas_io_iter_copy_set(&atom->iter, &blkio->iter); + + /* Move iterator for next IO */ + moved = cas_io_iter_move(&blkio->iter, atom->bytes); + + /* Validate if there is enough space in BIO data vector to do read */ + if (moved != atom->bytes) { + CAS_DEBUG_PARAM("ERROR, moved %u, expected = %u", + moved, cas_atomic_size_of(atom->bytes)); + return -EINVAL; + } + + return 0; +} + +static int cas_atomic_wr_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + ocf_cache_t cache; + struct ocf_atomic_metadata metadata; + struct bio_vec_iter dst, src; + uint32_t copied, added; + + uint64_t addr = atom->addr; + uint32_t bytes = atom->bytes; + + cache = ocf_volume_get_cache(io->volume); + + /* Initialize iterators */ + cas_io_iter_init(&dst, atom->data->vec, atom->bvec_size); + cas_io_iter_copy_set(&src, &blkio->iter); + + BUG_ON(!cache); + BUG_ON(bytes % SECTOR_SIZE); + + copied = 0; + if (atom->start) { + added = cas_atomic_size_of(atom->start); + cas_io_iter_move(&dst, added); + + bytes -= atom->start; + copied = added; + + addr += atom->start; + } + + if (atom->end) { + added = cas_atomic_size_of(atom->end); + bytes -= atom->end; + copied += added; + } + + BUG_ON(sizeof(metadata) != OCF_ATOMIC_METADATA_SIZE); + + while (bytes) { + /* Get metadata */ + if (ocf_metadata_get_atomic_entry(cache, addr, &metadata)) + break; + + /* Copy data */ + copied += cas_io_iter_cpy(&dst, &src, SECTOR_SIZE); + + /* Copy metadata */ + copied += cas_io_iter_cpy_from_data(&dst, &metadata, + sizeof(metadata)); + + bytes -= SECTOR_SIZE; + addr += SECTOR_SIZE; + } + + cas_io_iter_move(&blkio->iter, atom->bytes - (atom->start + atom->end)); + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EINVAL; + } + + return 0; +} + +static int cas_atomic_rd_metadata_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + uint32_t moved; + + BUG_ON(io->dir != OCF_READ); + + atom->metadata = true; + + /* Store BIO vector iterator, when read completed then it will be + * known were place data + */ + cas_io_iter_copy_set(&atom->iter, &blkio->iter); + + /* Move iterator for next IO */ + moved = cas_io_iter_move(&blkio->iter, (atom->bytes / SECTOR_SIZE) * + OCF_ATOMIC_METADATA_SIZE); + + /* Validate if copied proper numbers of bytes */ + if (moved != (atom->bytes / SECTOR_SIZE) * + OCF_ATOMIC_METADATA_SIZE) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + moved, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static void cas_atomic_end_atom(struct cas_atomic_io *atom, int error) +{ + struct cas_atomic_io *master = atom->master; + struct ocf_io *io = master->cmpl_context; + + if (error) + master->error |= error; + + if (atomic_dec_return(&master->req_remaining)) + return; + + CAS_DEBUG_MSG("Completion"); + + /* Send completion to caller */ + master->cmpl_fn(io, master->error); + + /* Free allocated structures */ + cas_atomic_dealloc(master); + ocf_io_put(io); +} + +static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio, + unsigned int bytes, int error) +{ + int err; + struct cas_atomic_io *atom; + struct bd_object *bdobj; + + BUG_ON(!bio); + BUG_ON(!bio->bi_private); + err = BLOCK_CALLBACK_ERROR(bio, error); + atom = bio->bi_private; + BUG_ON(!atom->master); + bdobj = bd_object(atom->volume); + + CAS_DEBUG_PARAM("BIO result = %d", BLOCK_CALLBACK_ERROR(bio, error)); + + if (err != 0) + goto out; + + if (atom->discard) + goto out; + + if (atom->metadata) { + if (cas_atomic_rd_metadata_complete(atom)) + atom->master->error = -EIO; + goto out; + } + + switch (atom->dir) { + case OCF_READ: + if (cas_atomic_rd_complete(atom)) + atom->master->error = -EIO; + break; + case OCF_WRITE: + if (!cas_blk_is_flush_io(atom->flags)) { + atomic_inc(&bdobj->potentially_dirty); + } else { + /* IO flush finished, update potential + * dirty state + */ + atomic_sub(atomic_read(&atom->potential_dirty), + &bdobj->potentially_dirty); + } + break; + } + +out: + /* Free BIO, no needed any more */ + BUG_ON(bio != atom->bio); + bio_put(bio); + atom->bio = NULL; + + cas_atomic_end_atom(atom, err); +} + +static void _cas_atomic_setup_cmd( + ocf_volume_t volume, + struct request *req, + struct bio* bio, + uint64_t bytes, + int dir, + void *end_io_data, + struct nvme_command *cmd) +{ + struct bd_object *bdobj = bd_object(volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + cmd->rw.opcode = (dir == OCF_WRITE) ? nvme_cmd_write : nvme_cmd_read; + cmd->rw.nsid = cpu_to_le32(ns_id); + cmd->rw.slba = cpu_to_le64(BIO_BISECTOR(bio)); + cmd->rw.length = cpu_to_le16((bytes / SECTOR_SIZE) - 1); + cmd->rw.control = cpu_to_le16(NVME_RW_LR); + + req->cmd_type = REQ_TYPE_DRV_PRIV; + req->cmd_flags |= REQ_FAILFAST_DRIVER; + + *cmd_addr = (unsigned long)cmd; + + req->timeout = ADMIN_TIMEOUT; /* TODO Use timeout for regular IO */ + + req->cmd = (unsigned char *) cmd; + req->cmd_len = sizeof(*cmd); + req->special = NULL; + req->end_io_data = end_io_data; +} + +static void cas_atomic_setup_cmd(int dir, struct cas_atomic_io *atom) +{ + _cas_atomic_setup_cmd(atom->volume, atom->request, atom->bio, + atom->bytes, dir, atom, &atom->cmd); +} + +static void cas_atomic_end_request(struct request *request, int error) +{ + struct cas_atomic_io *atom; + + BUG_ON(!request); + atom = request->end_io_data; + + /* Free request not needed any more */ + BUG_ON(atom->request != request); + blk_mq_free_request(request); + atom->request = NULL; + + CAS_DEBUG_PARAM("RQ result = %d", error); + + cas_atomic_end_atom(atom, error); +} + +static void cas_atomic_fire_atom(int dir, struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct bd_object *bdobj = bd_object(atom->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *queue = bdev_get_queue(bdev); + + struct bio *bio; + struct bio_vec *bvec; + uint32_t i; + + /* Allocate BIO */ + bio = atom->bio = bio_alloc(GFP_NOIO, atom->bvec_size); + if (!bio) + goto _fire_atom_ERROR; + + /* Setup BIO */ + bio->bi_bdev = bdev; + BIO_BISECTOR(bio) = atom->addr / SECTOR_SIZE; + bio->bi_next = NULL; + bio->bi_private = atom; + BIO_OP_FLAGS(bio) |= io->flags; + bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); + + /* Add pages to the BIO */ + bvec = atom->data->vec; + for (i = 0; i < atom->bvec_size; i++, bvec++) { + int added = bio_add_pc_page(queue, bio, + bvec->bv_page, bvec->bv_len, bvec->bv_offset); + + if (added != bvec->bv_len) { + /* Oops, a problem, cannot add page to the BIO */ + goto _fire_atom_ERROR; + } + } + + /* Allocate request */ + atom->request = cas_blk_make_request(queue, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + atom->request = NULL; + goto _fire_atom_ERROR; + } + + /* Setup command */ + cas_atomic_setup_cmd(dir, atom); + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send requests (NVME atomic command) */ + blk_execute_rq_nowait(queue, NULL, atom->request, 0, + cas_atomic_end_request); + + return; + +_fire_atom_ERROR: + CAS_DEBUG_MSG("ERROR"); + cas_atomic_end_atom(atom, -EIO); +} + +static void cas_atomic_fire_atoms(int dir, struct ocf_io *io, + struct cas_atomic_io *atoms) +{ + uint32_t i; + + ocf_io_get(io); + + for (i = 0; i < atoms->count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + CAS_DEBUG_PARAM("Fire(%u), Addr = %llu, bytes = %u", + i, this->addr, this->bytes); + cas_atomic_fire_atom(dir, io, this); + } +} + +typedef int (*cas_prepare_atom_pfn_t)(struct ocf_io *io, + struct cas_atomic_io *atoms); + +static int cas_atomic_prepare_atoms(struct ocf_io *io, + cas_prepare_atom_pfn_t prepare, + struct cas_atomic_io *atoms) +{ + int i; + int result = 0; + + if (!prepare) + return 0; + + for (i = 0; i < atoms->count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + CAS_DEBUG_PARAM("Sub-atomic IO preparation(%u), Addr = %llu, " + "bytes = %u, dir = %d", i, this->addr, + this->bytes, dir); + + result |= prepare(io, this); + } + + return result; +} + +static void cas_atomic_fire_io(struct ocf_io *io, + cas_prepare_atom_pfn_t prepare, + bool write_zero) +{ + int dir = io->dir; + + /* Create atomic IOs context, mainly allocations */ + struct cas_atomic_io *atoms = cas_atomic_alloc(dir, io, write_zero); + + if (!atoms) { + CAS_DEBUG_MSG("Memory allocation ERROR"); + goto _submit_io_ERROR; + } + + /* Prepare IOs, mainly coping data */ + if (cas_atomic_prepare_atoms(io, prepare, atoms)) { + CAS_DEBUG_MSG("Preparation ERROR"); + goto _submit_io_ERROR; + } + + /* Send IO */ + atomic_inc(&atoms->req_remaining); + cas_atomic_fire_atoms(dir, io, atoms); + cas_atomic_end_atom(atoms, 0); + + return; + +_submit_io_ERROR: + if (atoms) + cas_atomic_dealloc(atoms); + + io->end(io, -EIO); +} + +static void cas_atomic_submit_flush_bio(struct cas_atomic_io *atom) +{ + struct request *req = atom->request; + struct bd_object *bdobj = bd_object(atom->volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + struct nvme_command *cmd = &atom->cmd; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + cmd->rw.opcode = nvme_cmd_flush; + cmd->rw.nsid = cpu_to_le32(ns_id); + + *cmd_addr = (unsigned long)cmd; + + req->cmd_type = REQ_TYPE_DRV_PRIV; + + req->timeout = ADMIN_TIMEOUT; + + req->cmd = (unsigned char *) cmd; + req->cmd_len = sizeof(*cmd); + req->special = NULL; + req->end_io_data = atom; + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send NVMe flush command */ + blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); +} + +static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom) +{ + struct request *req = atom->request; + struct nvme_command *cmd = &atom->cmd; + struct bd_object *bdobj = bd_object(atom->volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + struct nvme_dsm_range *nvm_discard; + struct page *page; + int offset; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + nvm_discard = kmalloc(sizeof(*nvm_discard), GFP_ATOMIC); + if (!nvm_discard) { + return -ENOMEM; + } + + nvm_discard->cattr = cpu_to_le32(0); + nvm_discard->nlb = cpu_to_le32(BIO_BISIZE(atom->bio) >> SECTOR_SHIFT); + nvm_discard->slba = cpu_to_le64(BIO_BISECTOR(atom->bio)); + + cmd->dsm.opcode = nvme_cmd_dsm; + cmd->dsm.nsid = cpu_to_le32(ns_id); + cmd->dsm.nr = 0; + cmd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + req->completion_data = nvm_discard; + page = virt_to_page(nvm_discard); + offset = offset_in_page(nvm_discard); + blk_add_request_payload(req, page, offset, sizeof(*nvm_discard)); + + req->__sector = BIO_BISECTOR(atom->bio); + req->__data_len = BIO_BISIZE(atom->bio); + req->ioprio = bio_prio(atom->bio); + + req->timeout = ADMIN_TIMEOUT; + req->end_io_data = atom; + req->cmd_type = REQ_TYPE_DRV_PRIV; + req->cmd_flags = CAS_BIO_DISCARD; + + req->errors = 0; + + *cmd_addr = (unsigned long)cmd; + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send NVMe flush command */ + blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); + + return 0; +} + +static int cas_atomic_special_req_prepare(struct cas_atomic_io *atom, + struct ocf_io *io) +{ + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + + CAS_DEBUG_TRACE(); + atom->master = atom; + atom->count = 1; + atom->cmpl_fn = io->end; + atom->cmpl_context = io; + atom->volume = io->volume; + atom->flags = io->flags; + atomic_set(&atom->req_remaining, 1); + + atom->bio = bio_alloc(GFP_NOIO, 1); + if (!atom->bio) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for bio\n"); + return -ENOMEM; + } + + atom->bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); + atom->bio->bi_bdev = bdev; + atom->bio->bi_private = atom; + + return 0; +} + +void cas_atomic_submit_discard(struct ocf_io *io) +{ + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bdev); + int result = 0; + + struct cas_atomic_io *atom = NULL; + struct blkio *blkio = cas_io_to_blkio(io); + + CAS_DEBUG_TRACE(); + + if (!q) { + /* No queue, error */ + io->end(io, -EINVAL); + return; + } + + /* Allocate and setup control structure. */ + atom = ocf_mpool_new(atomic_io_allocator, 1); + if (!atom) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); + io->end(io, -ENOMEM); + return; + } + + result = cas_atomic_special_req_prepare(atom, io); + if (result) { + blkio->error = result; + goto out; + } + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + /* Set up specific field */ + atom->discard = true; + BIO_OP_FLAGS(atom->bio) = CAS_BIO_DISCARD; + BIO_BISECTOR(atom->bio) = io->addr / SECTOR_SIZE; + BIO_BISIZE(atom->bio) = io->bytes; + + atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + blkio->error = PTR_ERR(atom->request); + goto out; + } + + atomic_inc(&atom->req_remaining); + result = cas_atomic_submit_discard_bio(atom); + if (result) + blkio->error = result; + +out: + cas_atomic_end_atom(atom, blkio->error); +} + +void cas_atomic_submit_flush(struct ocf_io *io) +{ +#ifdef CAS_FLUSH_SUPPORTED + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bdev); + int result = 0; + struct cas_atomic_io *atom = NULL; + struct blkio *blkio = cas_io_to_blkio(io); + + CAS_DEBUG_TRACE(); + + blkio->dirty = atomic_read(&bdobj->potentially_dirty); + + if (!blkio->dirty) { + /* Didn't write anything to underlying disk; + * no need to send req_flush + */ + io->end(io, 0); + return; + } + + if (!q) { + io->end(io, -EINVAL); + return; + } + + if (!CHECK_QUEUE_FLUSH(q)) { + /* This block device does not support flush */ + atomic_sub(blkio->dirty, &bdobj->potentially_dirty); + io->end(io, 0); + return; + } + + /* Allocate and setup control structure. */ + atom = ocf_mpool_new(atomic_io_allocator, 1); + if (!atom) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); + io->end(io, -ENOMEM); + return; + } + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + result = cas_atomic_special_req_prepare(atom, io); + if (result) { + CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); + blkio->error = -ENOMEM; + goto out; + } + + /* Set up specific field */ + atom->dir = OCF_WRITE; + atomic_set(&atom->potential_dirty, blkio->dirty); + + atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + blkio->error = PTR_ERR(atom->request); + goto out; + } + + atomic_inc(&atom->req_remaining); + cas_atomic_submit_flush_bio(atom); + +out: + cas_atomic_end_atom(atom, blkio->error); +#else + /* Running operating system without support for REQ_FLUSH + * (i.e. SLES 11 SP 1) CAS cannot use flushing requests to handle + * power-fail safe Write-Back + */ + struct blkio *bdio = cas_io_to_blkio(io); + + io->end(io, -EINVAL); + /* on SLES 11 SP 1 powerfail safety can only be achieved through + * disabling volatile write cache of disk itself. + */ +#endif +} + +void cas_atomic_submit_io(struct ocf_io *io) +{ + CAS_DEBUG_TRACE(); + + if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) && + CAS_IS_WRITE_FLUSH(io->flags)) { + /* FLUSH */ + cas_atomic_submit_flush(io); + return; + } + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(KERN_ERR "Zero length request\n"); + io->end(io, -EINVAL); + return; + } + + cas_atomic_fire_io(io, io->dir == OCF_READ ? cas_atomic_rd_prepare : + cas_atomic_wr_prepare, false); +} + +void cas_atomic_submit_metadata(struct ocf_io *io) +{ + BUG_ON(io->dir != OCF_READ); + + CAS_DEBUG_TRACE(); + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); + io->end(io, -EINVAL); + return; + } + + cas_atomic_fire_io(io, cas_atomic_rd_metadata_prepare, false); +} + +unsigned int cas_atomic_get_max_io_size(ocf_volume_t volume) +{ + struct block_device *bd; + + if (!volume) + return 0; + + bd = bd_object(volume)->btm_bd; + if (!bd->bd_disk) + return 0; + + return queue_max_sectors(bd->bd_disk->queue); +} + +void cas_atomic_close_object(ocf_volume_t volume) +{ + struct bd_object *bdobj = bd_object(volume); + + if(bdobj->workqueue) + destroy_workqueue(bdobj->workqueue); + + block_dev_close_object(volume); +} + +int cas_atomic_open_object(ocf_volume_t volume) +{ + int result; + uint8_t type; + struct bd_object *bdobj = NULL; + + result = block_dev_open_object(volume); + if (result) + return result; + + bdobj = bd_object(volume); + + result = cas_blk_identify_type_by_bdev(bdobj->btm_bd, + &type, &bdobj->atomic_params); + + if (type != ATOMIC_DEVICE_VOLUME) { + cas_atomic_close_object(volume); + result = -OCF_ERR_INVAL_VOLUME_TYPE; + goto end; + } + + bdobj->workqueue = create_workqueue("CAS_AT_ZER"); + if (!bdobj->workqueue) { + cas_atomic_close_object(volume); + result = -ENOMEM; + goto end; + } + +end: + return result; +} + +uint64_t cas_atomic_get_length(ocf_volume_t volume) +{ + struct bd_object *bdobj = bd_object(volume); + + return bdobj->atomic_params.size; +} + +/* context to keep track of write_zero progress across child IOs */ +struct cas_atomic_write_zero_ctx +{ + struct ocf_io *sub_io; + struct ocf_io *original_io; + struct work_struct cmpl_work; + unsigned step_size; +}; + +static void _cas_atomic_write_zeroes_end(struct cas_atomic_write_zero_ctx *ctx, + int error) +{ + struct ocf_io *io = ctx->original_io; + + /* end master io */ + io->end(io, error); + ocf_io_put(io); + + /* cleanup context */ + ocf_io_put(ctx->sub_io); + kfree(ctx); +} + +/* atomic write zerores I/O completion */ +static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error) +{ + struct cas_atomic_write_zero_ctx *ctx = io->priv1; + struct bd_object *bdobj = bd_object(io->volume); + const unsigned bytes_processed = (io->addr - ctx->original_io->addr) + + io->bytes; + const unsigned bytes_left = ctx->original_io->bytes - bytes_processed; + + BUG_ON(io->bytes > ctx->step_size); + + /* update I/O address and size */ + io->addr += io->bytes; + io->bytes = min(bytes_left, ctx->step_size); + + if (!bytes_left || error) { + _cas_atomic_write_zeroes_end(ctx, error); + } else { + /* submit next IO from work context */ + queue_work(bdobj->workqueue, &ctx->cmpl_work); + } +} + +/* work routine to schedule next portion of write zero I/O */ +void _cas_atomic_write_zeroes_work(struct work_struct *work) +{ + struct cas_atomic_write_zero_ctx *ctx = container_of(work, + struct cas_atomic_write_zero_ctx, cmpl_work); + + cas_atomic_fire_io(ctx->sub_io, NULL, true); +} + +void cas_atomic_submit_write_zeroes(struct ocf_io *io) +{ + /* send 8 atoms in each I/O */ + const unsigned step_size = min(cas_atomic_max_io_sectors() + * SECTOR_SIZE * 8, io->bytes); + struct cas_atomic_write_zero_ctx *ctx = NULL; + int result = 0; + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); + result = -EINVAL; + goto error; + } + + ctx = kmalloc(sizeof(*ctx), GFP_NOIO); + if (!ctx) { + result = -ENOMEM; + goto error; + } + + ctx->sub_io = ocf_volume_new_io(io->volume); + if (!ctx->sub_io) { + result = -ENOMEM; + goto error_after_ctx; + } + + /* set up context */ + ctx->step_size = step_size; + ctx->original_io = io; + INIT_WORK(&ctx->cmpl_work, _cas_atomic_write_zeroes_work); + + /* get reference to original io */ + ocf_io_get(io); + + /* set up sub-io */ + ocf_io_configure(ctx->sub_io, io->addr, + min(io->bytes, ctx->step_size), + OCF_WRITE, 0, 0); + ocf_io_set_cmpl(ctx->sub_io, ctx, NULL, _cas_atomic_write_zeroes_step_cmpl); + + cas_atomic_fire_io(ctx->sub_io, NULL, true); + + return; + +error_after_ctx: + kfree(ctx); +error: + io->end(io, result); +} + +const struct ocf_volume_properties cas_object_atomic_properties = { + .name = "Atomic Writes NVMe", + .io_priv_size = sizeof(struct blkio), + .volume_priv_size = sizeof(struct bd_object), + .caps = { + .atomic_writes = 1, + }, + .ops = { + .submit_io = cas_atomic_submit_io, + .submit_flush = cas_atomic_submit_flush, + .submit_discard = cas_atomic_submit_discard, + .submit_metadata = cas_atomic_submit_metadata, + .submit_write_zeroes = cas_atomic_submit_write_zeroes, + .open = cas_atomic_open_object, + .close = block_dev_close_object, + .get_max_io_size = cas_atomic_get_max_io_size, + .get_length = cas_atomic_get_length, + }, + .io_ops = { + .set_data = cas_blk_io_set_data, + .get_data = cas_blk_io_get_data, + }, +}; + +int atomic_dev_init(void) +{ + int ret; + + ret = ocf_ctx_register_volume_type(cas_ctx, ATOMIC_DEVICE_VOLUME, + &cas_object_atomic_properties); + + if (ret < 0) + return -EINVAL; + + atomic_io_allocator = ocf_mpool_create(NULL, 0, + sizeof(struct cas_atomic_io), GFP_NOIO, 1, "cas_atomic_io"); + + if (!atomic_io_allocator) { + ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_VOLUME); + return -ENOMEM; + } + + return 0; +} + +void atomic_dev_deinit(void) +{ + if (atomic_io_allocator) { + ocf_mpool_destroy(atomic_io_allocator); + atomic_io_allocator = NULL; + } + + ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_VOLUME); +} + +#else + +int atomic_dev_init(void) +{ + return 0; +} + +void atomic_dev_deinit(void) +{ +} + +#endif diff --git a/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig b/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig new file mode 100644 index 000000000..2e3a81e21 --- /dev/null +++ b/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig @@ -0,0 +1,1217 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" +#if defined(CAS_NVME_FULL) + +#include +#include + +#define CAS_DEBUG_IO_ATOMIC 0 + +#if 1 == CAS_DEBUG_IO_ATOMIC +#define CAS_DEBUG_TRACE() printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d\n", __func__, __LINE__) + +#define CAS_DEBUG_MSG(msg) printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d - %s\n", __func__, __LINE__, msg) + +#define CAS_DEBUG_PARAM(format, ...) printk(KERN_DEBUG \ + "[IO][ATOMIC] %s:%d - "format"\n", __func__, \ + __LINE__, ##__VA_ARGS__) +#else +#define CAS_DEBUG_TRACE() +#define CAS_DEBUG_MSG(msg) +#define CAS_DEBUG_PARAM(format, ...) +#endif + +#define ADMIN_TIMEOUT (60 * HZ) + +struct cas_atomic_io { + ocf_volume_t volume; + + struct cas_atomic_io *master; + atomic_t req_remaining; + atomic_t potential_dirty; + uint32_t count; + + uint64_t addr; + uint32_t bytes; + uint32_t start; + uint32_t end; + + int error; + unsigned dir:1; + unsigned metadata:1; + unsigned discard:1; + unsigned long flags; + + ocf_end_io_t cmpl_fn; + void *cmpl_context; + + struct blk_data *data; + uint32_t bvec_size; + + struct nvme_command cmd; + struct bio *bio; + struct request *request; + + struct bio_vec_iter iter; +}; + +static struct ocf_mpool *atomic_io_allocator; + +static inline uint32_t cas_atomic_max_io_sectors(void) +{ + /* TODO Take into account max IO size of bottom device */ + return 128 * KiB / (SECTOR_SIZE + OCF_ATOMIC_METADATA_SIZE); +} + +static inline uint32_t cas_atomic_size_of(uint32_t size) +{ + BUG_ON(size % SECTOR_SIZE); + return size + (size / SECTOR_SIZE * OCF_ATOMIC_METADATA_SIZE); +} + +static void cas_atomic_dealloc(struct cas_atomic_io *atomics) +{ + uint32_t i; + + for (i = 0; i < atomics->count; i++) { + struct cas_atomic_io *this = &atomics[i]; + + if (this->request && !IS_ERR(this->request)) { + blk_mq_free_request(this->request); + this->request = NULL; + } + + if (this->bio) + bio_put(this->bio); + + if (this->data) { + cas_ctx_data_secure_erase(this->data); + cas_ctx_data_free(this->data); + } + } + + ocf_mpool_del(atomic_io_allocator, atomics, atomics->count); +} + +static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool write_zero) +{ + /* Get max size of IO */ + const uint32_t max_io_size = cas_atomic_max_io_sectors() + * SECTOR_SIZE; + + /* Get number of IOs to be issued */ + uint32_t ios_count; + ocf_cache_t cache = ocf_volume_get_cache(io->volume); + + uint64_t addr = io->addr; + uint32_t i, bytes = io->bytes; + uint32_t increase_sectors_start = 0, increase_sectors_end = 0; + struct cas_atomic_io *atoms; + + if (dir == OCF_WRITE && !write_zero) { + /* TODO: this logic is probably no longer required */ + BUG_ON(!cache); + increase_sectors_start = + ocf_metadata_check_invalid_before(cache, addr); + + increase_sectors_end = + ocf_metadata_check_invalid_after(cache, addr, + io->bytes); + increase_sectors_start *= 512; + increase_sectors_end *= 512; + + if (increase_sectors_start) { + bytes += increase_sectors_start; + addr -= increase_sectors_start; + } + + if (increase_sectors_end) + bytes += increase_sectors_end; + } + + /* Get number of IOs to be issued */ + ios_count = DIV_ROUND_UP(bytes, max_io_size); + + atoms = ocf_mpool_new(atomic_io_allocator, + ios_count); + if (!atoms) + return NULL; + + CAS_DEBUG_PARAM("Addr = %llu, bytes = %u", io->addr, io->bytes); + + /* setup master IO */ + atomic_set(&atoms->req_remaining, ios_count); + + atoms->count = ios_count; + atoms->cmpl_fn = io->end; + atoms->cmpl_context = io; + + for (i = 0; i < ios_count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + this->master = atoms; + this->addr = addr; + this->bytes = min(bytes, max_io_size); + this->dir = dir; + this->flags = io->flags; + this->volume = io->volume; + + CAS_DEBUG_PARAM("Sub-atomic IO (%u), Addr = %llu, bytes = %u", + i, this->addr, this->bytes); + + addr += this->bytes; + bytes -= this->bytes; + + /* Allocate BIO data vector with pages */ + this->bvec_size = cas_atomic_size_of(this->bytes); + this->bvec_size = DIV_ROUND_UP(this->bvec_size, PAGE_SIZE); + + if (write_zero || increase_sectors_start || + increase_sectors_end) + this->data = cas_ctx_data_zalloc(this->bvec_size); + else + this->data = cas_ctx_data_alloc(this->bvec_size); + + if (!this->data) + goto cas_atomic_alloc_ERROR; + + /* Set length of last page */ + this->data->vec[this->bvec_size - 1].bv_len = + cas_atomic_size_of(this->bytes) % PAGE_SIZE; + + CAS_DEBUG_PARAM("Sub-atomic IO (%u), BIO vector size = %u, " + "alignment %u", i, this->bvec_size, + this->data->vec[this->bvec_size - 1].bv_len); + + this->start = min(this->bytes, increase_sectors_start); + increase_sectors_start -= this->start; + } + BUG_ON(bytes); + + for (i = ios_count; i && increase_sectors_end; i--) { + struct cas_atomic_io *this = &atoms[i - 1]; + + this->end = min(this->bytes, increase_sectors_end); + increase_sectors_end -= this->end; + } + + return atoms; + +cas_atomic_alloc_ERROR: + + if (atoms) + cas_atomic_dealloc(atoms); + + return NULL; +} + +static int cas_atomic_rd_complete(struct cas_atomic_io *atom) +{ + struct bio_vec_iter *dst, src; + uint32_t copied; + const uint32_t size = OCF_ATOMIC_METADATA_SIZE; + + uint32_t bytes = atom->bytes; + + CAS_DEBUG_TRACE(); + + /* Initialize iterators */ + cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); + dst = &atom->iter; + + BUG_ON(bytes % SECTOR_SIZE); + BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); + + copied = 0; + while (bytes) { + /* Copy data */ + copied += cas_io_iter_cpy(dst, &src, SECTOR_SIZE); + + /* Omit metadata */ + copied += cas_io_iter_move(&src, size); + + bytes -= SECTOR_SIZE; + } + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static int cas_atomic_rd_metadata_complete(struct cas_atomic_io *atom) +{ + struct bio_vec_iter *dst, src; + uint32_t copied; + const uint32_t size = OCF_ATOMIC_METADATA_SIZE; + + uint32_t bytes = atom->bytes; + + CAS_DEBUG_TRACE(); + + /* Initialize iterators */ + cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); + dst = &atom->iter; + + BUG_ON(bytes % SECTOR_SIZE); + BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); + + copied = 0; + while (bytes) { + /* Copy data */ + copied += cas_io_iter_move(&src, SECTOR_SIZE); + + /* Omit metadata */ + copied += cas_io_iter_cpy(dst, &src, size); + + bytes -= SECTOR_SIZE; + } + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static int cas_atomic_rd_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + uint32_t moved; + + /* Store BIO vector iterator, when read completed then it will be + * known were place data + */ + cas_io_iter_copy_set(&atom->iter, &blkio->iter); + + /* Move iterator for next IO */ + moved = cas_io_iter_move(&blkio->iter, atom->bytes); + + /* Validate if there is enough space in BIO data vector to do read */ + if (moved != atom->bytes) { + CAS_DEBUG_PARAM("ERROR, moved %u, expected = %u", + moved, cas_atomic_size_of(atom->bytes)); + return -EINVAL; + } + + return 0; +} + +static int cas_atomic_wr_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + ocf_cache_t cache; + struct ocf_atomic_metadata metadata; + struct bio_vec_iter dst, src; + uint32_t copied, added; + + uint64_t addr = atom->addr; + uint32_t bytes = atom->bytes; + + cache = ocf_volume_get_cache(io->volume); + + /* Initialize iterators */ + cas_io_iter_init(&dst, atom->data->vec, atom->bvec_size); + cas_io_iter_copy_set(&src, &blkio->iter); + + BUG_ON(!cache); + BUG_ON(bytes % SECTOR_SIZE); + + copied = 0; + if (atom->start) { + added = cas_atomic_size_of(atom->start); + cas_io_iter_move(&dst, added); + + bytes -= atom->start; + copied = added; + + addr += atom->start; + } + + if (atom->end) { + added = cas_atomic_size_of(atom->end); + bytes -= atom->end; + copied += added; + } + + BUG_ON(sizeof(metadata) != OCF_ATOMIC_METADATA_SIZE); + + while (bytes) { + /* Get metadata */ + if (ocf_metadata_get_atomic_entry(cache, addr, &metadata)) + break; + + /* Copy data */ + copied += cas_io_iter_cpy(&dst, &src, SECTOR_SIZE); + + /* Copy metadata */ + copied += cas_io_iter_cpy_from_data(&dst, &metadata, + sizeof(metadata)); + + bytes -= SECTOR_SIZE; + addr += SECTOR_SIZE; + } + + cas_io_iter_move(&blkio->iter, atom->bytes - (atom->start + atom->end)); + + /* Validate if copied proper numbers of bytes */ + if (copied != cas_atomic_size_of(atom->bytes)) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + copied, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EINVAL; + } + + return 0; +} + +static int cas_atomic_rd_metadata_prepare(struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct blkio *blkio = cas_io_to_blkio(io); + uint32_t moved; + + BUG_ON(io->dir != OCF_READ); + + atom->metadata = true; + + /* Store BIO vector iterator, when read completed then it will be + * known were place data + */ + cas_io_iter_copy_set(&atom->iter, &blkio->iter); + + /* Move iterator for next IO */ + moved = cas_io_iter_move(&blkio->iter, (atom->bytes / SECTOR_SIZE) * + OCF_ATOMIC_METADATA_SIZE); + + /* Validate if copied proper numbers of bytes */ + if (moved != (atom->bytes / SECTOR_SIZE) * + OCF_ATOMIC_METADATA_SIZE) { + CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", + moved, cas_atomic_size_of(atom->bytes)); + /* Metadata and data coping problem */ + return -EIO; + } + + return 0; +} + +static void cas_atomic_end_atom(struct cas_atomic_io *atom, int error) +{ + struct cas_atomic_io *master = atom->master; + struct ocf_io *io = master->cmpl_context; + + if (error) + master->error |= error; + + if (atomic_dec_return(&master->req_remaining)) + return; + + CAS_DEBUG_MSG("Completion"); + + /* Send completion to caller */ + master->cmpl_fn(io, master->error); + + /* Free allocated structures */ + cas_atomic_dealloc(master); + ocf_io_put(io); +} + +static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio, + unsigned int bytes, int error) +{ + int err; + struct cas_atomic_io *atom; + struct bd_object *bdobj; + + BUG_ON(!bio); + BUG_ON(!bio->bi_private); + err = BLOCK_CALLBACK_ERROR(bio, error); + atom = bio->bi_private; + BUG_ON(!atom->master); + bdobj = bd_object(atom->volume); + + CAS_DEBUG_PARAM("BIO result = %d", BLOCK_CALLBACK_ERROR(bio, error)); + + if (err != 0) + goto out; + + if (atom->discard) + goto out; + + if (atom->metadata) { + if (cas_atomic_rd_metadata_complete(atom)) + atom->master->error = -EIO; + goto out; + } + + switch (atom->dir) { + case OCF_READ: + if (cas_atomic_rd_complete(atom)) + atom->master->error = -EIO; + break; + case OCF_WRITE: + if (!cas_blk_is_flush_io(atom->flags)) { + atomic_inc(&bdobj->potentially_dirty); + } else { + /* IO flush finished, update potential + * dirty state + */ + atomic_sub(atomic_read(&atom->potential_dirty), + &bdobj->potentially_dirty); + } + break; + } + +out: + /* Free BIO, no needed any more */ + BUG_ON(bio != atom->bio); + bio_put(bio); + atom->bio = NULL; + + cas_atomic_end_atom(atom, err); +} + +static void _cas_atomic_setup_cmd( + ocf_volume_t volume, + struct request *req, + struct bio* bio, + uint64_t bytes, + int dir, + void *end_io_data, + struct nvme_command *cmd) +{ + struct bd_object *bdobj = bd_object(volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + cmd->rw.opcode = (dir == OCF_WRITE) ? nvme_cmd_write : nvme_cmd_read; + cmd->rw.nsid = cpu_to_le32(ns_id); + cmd->rw.slba = cpu_to_le64(BIO_BISECTOR(bio)); + cmd->rw.length = cpu_to_le16((bytes / SECTOR_SIZE) - 1); + cmd->rw.control = cpu_to_le16(NVME_RW_LR); + + req->cmd_type = REQ_TYPE_DRV_PRIV; + req->cmd_flags |= REQ_FAILFAST_DRIVER; + + *cmd_addr = (unsigned long)cmd; + + req->timeout = ADMIN_TIMEOUT; /* TODO Use timeout for regular IO */ + + req->cmd = (unsigned char *) cmd; + req->cmd_len = sizeof(*cmd); + req->special = NULL; + req->end_io_data = end_io_data; +} + +static void cas_atomic_setup_cmd(int dir, struct cas_atomic_io *atom) +{ + _cas_atomic_setup_cmd(atom->volume, atom->request, atom->bio, + atom->bytes, dir, atom, &atom->cmd); +} + +static void cas_atomic_end_request(struct request *request, int error) +{ + struct cas_atomic_io *atom; + + BUG_ON(!request); + atom = request->end_io_data; + + /* Free request not needed any more */ + BUG_ON(atom->request != request); + blk_mq_free_request(request); + atom->request = NULL; + + CAS_DEBUG_PARAM("RQ result = %d", error); + + cas_atomic_end_atom(atom, error); +} + +static void cas_atomic_fire_atom(int dir, struct ocf_io *io, + struct cas_atomic_io *atom) +{ + struct bd_object *bdobj = bd_object(atom->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *queue = bdev_get_queue(bdev); + + struct bio *bio; + struct bio_vec *bvec; + uint32_t i; + + /* Allocate BIO */ + bio = atom->bio = bio_alloc(GFP_NOIO, atom->bvec_size); + if (!bio) + goto _fire_atom_ERROR; + + /* Setup BIO */ + bio->bi_bdev = bdev; + BIO_BISECTOR(bio) = atom->addr / SECTOR_SIZE; + bio->bi_next = NULL; + bio->bi_private = atom; + BIO_OP_FLAGS(bio) |= io->flags; + bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); + + /* Add pages to the BIO */ + bvec = atom->data->vec; + for (i = 0; i < atom->bvec_size; i++, bvec++) { + int added = bio_add_pc_page(queue, bio, + bvec->bv_page, bvec->bv_len, bvec->bv_offset); + + if (added != bvec->bv_len) { + /* Oops, a problem, cannot add page to the BIO */ + goto _fire_atom_ERROR; + } + } + + /* Allocate request */ + atom->request = cas_blk_make_request(queue, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + atom->request = NULL; + goto _fire_atom_ERROR; + } + + /* Setup command */ + cas_atomic_setup_cmd(dir, atom); + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send requests (NVME atomic command) */ + blk_execute_rq_nowait(queue, NULL, atom->request, 0, + cas_atomic_end_request); + + return; + +_fire_atom_ERROR: + CAS_DEBUG_MSG("ERROR"); + cas_atomic_end_atom(atom, -EIO); +} + +static void cas_atomic_fire_atoms(int dir, struct ocf_io *io, + struct cas_atomic_io *atoms) +{ + uint32_t i; + + ocf_io_get(io); + + for (i = 0; i < atoms->count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + CAS_DEBUG_PARAM("Fire(%u), Addr = %llu, bytes = %u", + i, this->addr, this->bytes); + cas_atomic_fire_atom(dir, io, this); + } +} + +typedef int (*cas_prepare_atom_pfn_t)(struct ocf_io *io, + struct cas_atomic_io *atoms); + +static int cas_atomic_prepare_atoms(struct ocf_io *io, + cas_prepare_atom_pfn_t prepare, + struct cas_atomic_io *atoms) +{ + int i; + int result = 0; + + if (!prepare) + return 0; + + for (i = 0; i < atoms->count; i++) { + struct cas_atomic_io *this = &atoms[i]; + + CAS_DEBUG_PARAM("Sub-atomic IO preparation(%u), Addr = %llu, " + "bytes = %u, dir = %d", i, this->addr, + this->bytes, dir); + + result |= prepare(io, this); + } + + return result; +} + +static void cas_atomic_fire_io(struct ocf_io *io, + cas_prepare_atom_pfn_t prepare, + bool write_zero) +{ + int dir = io->dir; + + /* Create atomic IOs context, mainly allocations */ + struct cas_atomic_io *atoms = cas_atomic_alloc(dir, io, write_zero); + + if (!atoms) { + CAS_DEBUG_MSG("Memory allocation ERROR"); + goto _submit_io_ERROR; + } + + /* Prepare IOs, mainly coping data */ + if (cas_atomic_prepare_atoms(io, prepare, atoms)) { + CAS_DEBUG_MSG("Preparation ERROR"); + goto _submit_io_ERROR; + } + + /* Send IO */ + atomic_inc(&atoms->req_remaining); + cas_atomic_fire_atoms(dir, io, atoms); + cas_atomic_end_atom(atoms, 0); + + return; + +_submit_io_ERROR: + if (atoms) + cas_atomic_dealloc(atoms); + + io->end(io, -EIO); +} + +static void cas_atomic_submit_flush_bio(struct cas_atomic_io *atom) +{ + struct request *req = atom->request; + struct bd_object *bdobj = bd_object(atom->volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + struct nvme_command *cmd = &atom->cmd; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + cmd->rw.opcode = nvme_cmd_flush; + cmd->rw.nsid = cpu_to_le32(ns_id); + + *cmd_addr = (unsigned long)cmd; + + req->cmd_type = REQ_TYPE_DRV_PRIV; + + req->timeout = ADMIN_TIMEOUT; + + req->cmd = (unsigned char *) cmd; + req->cmd_len = sizeof(*cmd); + req->special = NULL; + req->end_io_data = atom; + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send NVMe flush command */ + blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); +} + +static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom) +{ + struct request *req = atom->request; + struct nvme_command *cmd = &atom->cmd; + struct bd_object *bdobj = bd_object(atom->volume); + unsigned int ns_id = bdobj->atomic_params.nsid; + struct nvme_dsm_range *nvm_discard; + struct page *page; + int offset; + unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); + + nvm_discard = kmalloc(sizeof(*nvm_discard), GFP_ATOMIC); + if (!nvm_discard) { + return -ENOMEM; + } + + nvm_discard->cattr = cpu_to_le32(0); + nvm_discard->nlb = cpu_to_le32(BIO_BISIZE(atom->bio) >> SECTOR_SHIFT); + nvm_discard->slba = cpu_to_le64(BIO_BISECTOR(atom->bio)); + + cmd->dsm.opcode = nvme_cmd_dsm; + cmd->dsm.nsid = cpu_to_le32(ns_id); + cmd->dsm.nr = 0; + cmd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + req->completion_data = nvm_discard; + page = virt_to_page(nvm_discard); + offset = offset_in_page(nvm_discard); + blk_add_request_payload(req, page, offset, sizeof(*nvm_discard)); + + req->__sector = BIO_BISECTOR(atom->bio); + req->__data_len = BIO_BISIZE(atom->bio); + req->ioprio = bio_prio(atom->bio); + + req->timeout = ADMIN_TIMEOUT; + req->end_io_data = atom; + req->cmd_type = REQ_TYPE_DRV_PRIV; + req->cmd_flags = CAS_BIO_DISCARD; + + req->errors = 0; + + *cmd_addr = (unsigned long)cmd; + + /* Additional completion for request */ + atomic_inc(&atom->master->req_remaining); + + /* Send NVMe flush command */ + blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); + + return 0; +} + +static int cas_atomic_special_req_prepare(struct cas_atomic_io *atom, + struct ocf_io *io) +{ + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + + CAS_DEBUG_TRACE(); + atom->master = atom; + atom->count = 1; + atom->cmpl_fn = io->end; + atom->cmpl_context = io; + atom->volume = io->volume; + atom->flags = io->flags; + atomic_set(&atom->req_remaining, 1); + + atom->bio = bio_alloc(GFP_NOIO, 1); + if (!atom->bio) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for bio\n"); + return -ENOMEM; + } + + atom->bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); + atom->bio->bi_bdev = bdev; + atom->bio->bi_private = atom; + + return 0; +} + +void cas_atomic_submit_discard(struct ocf_io *io) +{ + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bdev); + int result = 0; + + struct cas_atomic_io *atom = NULL; + struct blkio *blkio = cas_io_to_blkio(io); + + CAS_DEBUG_TRACE(); + + if (!q) { + /* No queue, error */ + io->end(io, -EINVAL); + return; + } + + /* Allocate and setup control structure. */ + atom = ocf_mpool_new(atomic_io_allocator, 1); + if (!atom) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); + io->end(io, -ENOMEM); + return; + } + + result = cas_atomic_special_req_prepare(atom, io); + if (result) { + blkio->error = result; + goto out; + } + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + /* Set up specific field */ + atom->discard = true; + BIO_OP_FLAGS(atom->bio) = CAS_BIO_DISCARD; + BIO_BISECTOR(atom->bio) = io->addr / SECTOR_SIZE; + BIO_BISIZE(atom->bio) = io->bytes; + + atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + blkio->error = PTR_ERR(atom->request); + goto out; + } + + atomic_inc(&atom->req_remaining); + result = cas_atomic_submit_discard_bio(atom); + if (result) + blkio->error = result; + +out: + cas_atomic_end_atom(atom, blkio->error); +} + +void cas_atomic_submit_flush(struct ocf_io *io) +{ +#ifdef CAS_FLUSH_SUPPORTED + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bdev); + int result = 0; + struct cas_atomic_io *atom = NULL; + struct blkio *blkio = cas_io_to_blkio(io); + + CAS_DEBUG_TRACE(); + + blkio->dirty = atomic_read(&bdobj->potentially_dirty); + + if (!blkio->dirty) { + /* Didn't write anything to underlying disk; + * no need to send req_flush + */ + io->end(io, 0); + return; + } + + if (!q) { + io->end(io, -EINVAL); + return; + } + + if (!CHECK_QUEUE_FLUSH(q)) { + /* This block device does not support flush */ + atomic_sub(blkio->dirty, &bdobj->potentially_dirty); + io->end(io, 0); + return; + } + + /* Allocate and setup control structure. */ + atom = ocf_mpool_new(atomic_io_allocator, 1); + if (!atom) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); + io->end(io, -ENOMEM); + return; + } + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + result = cas_atomic_special_req_prepare(atom, io); + if (result) { + CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); + blkio->error = -ENOMEM; + goto out; + } + + /* Set up specific field */ + atom->dir = OCF_WRITE; + atomic_set(&atom->potential_dirty, blkio->dirty); + + atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); + if (IS_ERR(atom->request)) { + blkio->error = PTR_ERR(atom->request); + goto out; + } + + atomic_inc(&atom->req_remaining); + cas_atomic_submit_flush_bio(atom); + +out: + cas_atomic_end_atom(atom, blkio->error); +#else + /* Running operating system without support for REQ_FLUSH + * (i.e. SLES 11 SP 1) CAS cannot use flushing requests to handle + * power-fail safe Write-Back + */ + struct blkio *bdio = cas_io_to_blkio(io); + + io->end(io, -EINVAL); + /* on SLES 11 SP 1 powerfail safety can only be achieved through + * disabling volatile write cache of disk itself. + */ +#endif +} + +void cas_atomic_submit_io(struct ocf_io *io) +{ + CAS_DEBUG_TRACE(); + + if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) && + CAS_IS_WRITE_FLUSH(io->flags)) { + /* FLUSH */ + cas_atomic_submit_flush(io); + return; + } + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(KERN_ERR "Zero length request\n"); + io->end(io, -EINVAL); + return; + } + + cas_atomic_fire_io(io, io->dir == OCF_READ ? cas_atomic_rd_prepare : + cas_atomic_wr_prepare, false); +} + +void cas_atomic_submit_metadata(struct ocf_io *io) +{ + BUG_ON(io->dir != OCF_READ); + + CAS_DEBUG_TRACE(); + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); + io->end(io, -EINVAL); + return; + } + + cas_atomic_fire_io(io, cas_atomic_rd_metadata_prepare, false); +} + +unsigned int cas_atomic_get_max_io_size(ocf_volume_t volume) +{ + struct block_device *bd; + + if (!volume) + return 0; + + bd = bd_object(volume)->btm_bd; + if (!bd->bd_disk) + return 0; + + return queue_max_sectors(bd->bd_disk->queue); +} + +void cas_atomic_close_object(ocf_volume_t volume) +{ + struct bd_object *bdobj = bd_object(volume); + + if(bdobj->workqueue) + destroy_workqueue(bdobj->workqueue); + + block_dev_close_object(volume); +} + +int cas_atomic_open_object(ocf_volume_t volume) +{ + int result; + uint8_t type; + struct bd_object *bdobj = NULL; + + result = block_dev_open_object(volume); + if (result) + return result; + + bdobj = bd_object(volume); + + result = cas_blk_identify_type_by_bdev(bdobj->btm_bd, + &type, &bdobj->atomic_params); + +<<<<<<< HEAD + if (type != ATOMIC_DEVICE_OBJECT) { + cas_atomic_close_object(volume); + result = -OCF_ERR_INVAL_VOLUME_TYPE; +======= + if (type != ATOMIC_DEVICE_VOLUME) { + cas_atomic_close_object(obj); + result = -OCF_ERR_INVAL_DATA_OBJ_TYPE; +>>>>>>> A little cleanup + goto end; + } + + bdobj->workqueue = create_workqueue("CAS_AT_ZER"); + if (!bdobj->workqueue) { + cas_atomic_close_object(volume); + result = -ENOMEM; + goto end; + } + +end: + return result; +} + +uint64_t cas_atomic_get_length(ocf_volume_t volume) +{ + struct bd_object *bdobj = bd_object(volume); + + return bdobj->atomic_params.size; +} + +/* context to keep track of write_zero progress across child IOs */ +struct cas_atomic_write_zero_ctx +{ + struct ocf_io *sub_io; + struct ocf_io *original_io; + struct work_struct cmpl_work; + unsigned step_size; +}; + +static void _cas_atomic_write_zeroes_end(struct cas_atomic_write_zero_ctx *ctx, + int error) +{ + struct ocf_io *io = ctx->original_io; + + /* end master io */ + io->end(io, error); + ocf_io_put(io); + + /* cleanup context */ + ocf_io_put(ctx->sub_io); + kfree(ctx); +} + +/* atomic write zerores I/O completion */ +static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error) +{ + struct cas_atomic_write_zero_ctx *ctx = io->priv1; + struct bd_object *bdobj = bd_object(io->volume); + const unsigned bytes_processed = (io->addr - ctx->original_io->addr) + + io->bytes; + const unsigned bytes_left = ctx->original_io->bytes - bytes_processed; + + BUG_ON(io->bytes > ctx->step_size); + + /* update I/O address and size */ + io->addr += io->bytes; + io->bytes = min(bytes_left, ctx->step_size); + + if (!bytes_left || error) { + _cas_atomic_write_zeroes_end(ctx, error); + } else { + /* submit next IO from work context */ + queue_work(bdobj->workqueue, &ctx->cmpl_work); + } +} + +/* work routine to schedule next portion of write zero I/O */ +void _cas_atomic_write_zeroes_work(struct work_struct *work) +{ + struct cas_atomic_write_zero_ctx *ctx = container_of(work, + struct cas_atomic_write_zero_ctx, cmpl_work); + + cas_atomic_fire_io(ctx->sub_io, NULL, true); +} + +void cas_atomic_submit_write_zeroes(struct ocf_io *io) +{ + /* send 8 atoms in each I/O */ + const unsigned step_size = min(cas_atomic_max_io_sectors() + * SECTOR_SIZE * 8, io->bytes); + struct cas_atomic_write_zero_ctx *ctx = NULL; + int result = 0; + + if (unlikely(!io->bytes)) { + CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); + result = -EINVAL; + goto error; + } + + ctx = kmalloc(sizeof(*ctx), GFP_NOIO); + if (!ctx) { + result = -ENOMEM; + goto error; + } + + ctx->sub_io = ocf_volume_new_io(io->volume); + if (!ctx->sub_io) { + result = -ENOMEM; + goto error_after_ctx; + } + + /* set up context */ + ctx->step_size = step_size; + ctx->original_io = io; + INIT_WORK(&ctx->cmpl_work, _cas_atomic_write_zeroes_work); + + /* get reference to original io */ + ocf_io_get(io); + + /* set up sub-io */ + ocf_io_configure(ctx->sub_io, io->addr, + min(io->bytes, ctx->step_size), + OCF_WRITE, 0, 0); + ocf_io_set_cmpl(ctx->sub_io, ctx, NULL, _cas_atomic_write_zeroes_step_cmpl); + + cas_atomic_fire_io(ctx->sub_io, NULL, true); + + return; + +error_after_ctx: + kfree(ctx); +error: + io->end(io, result); +} + +const struct ocf_volume_properties cas_object_atomic_properties = { + .name = "Atomic Writes NVMe", + .io_priv_size = sizeof(struct blkio), + .volume_priv_size = sizeof(struct bd_object), + .caps = { + .atomic_writes = 1, + }, + .ops = { + .submit_io = cas_atomic_submit_io, + .submit_flush = cas_atomic_submit_flush, + .submit_discard = cas_atomic_submit_discard, + .submit_metadata = cas_atomic_submit_metadata, + .submit_write_zeroes = cas_atomic_submit_write_zeroes, + .open = cas_atomic_open_object, + .close = block_dev_close_object, + .get_max_io_size = cas_atomic_get_max_io_size, + .get_length = cas_atomic_get_length, + }, + .io_ops = { + .set_data = cas_blk_io_set_data, + .get_data = cas_blk_io_get_data, + }, +}; + +int atomic_dev_init(void) +{ + int ret; + +<<<<<<< HEAD + ret = ocf_ctx_register_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT, +======= + ret = ocf_ctx_register_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME, +>>>>>>> A little cleanup + &cas_object_atomic_properties); + + if (ret < 0) + return -EINVAL; + + atomic_io_allocator = ocf_mpool_create(NULL, 0, + sizeof(struct cas_atomic_io), GFP_NOIO, 1, "cas_atomic_io"); + + if (!atomic_io_allocator) { +<<<<<<< HEAD + ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT); +======= + ocf_ctx_unregister_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME); +>>>>>>> A little cleanup + return -ENOMEM; + } + + return 0; +} + +void atomic_dev_deinit(void) +{ + if (atomic_io_allocator) { + ocf_mpool_destroy(atomic_io_allocator); + atomic_io_allocator = NULL; + } + +<<<<<<< HEAD + ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT); +======= + ocf_ctx_unregister_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME); +>>>>>>> A little cleanup +} + +#else + +int atomic_dev_init(void) +{ + return 0; +} + +void atomic_dev_deinit(void) +{ +} + +#endif diff --git a/modules/cas_cache/volume/vol_atomic_dev_bottom.h b/modules/cas_cache/volume/vol_atomic_dev_bottom.h new file mode 100644 index 000000000..1d5ae1c60 --- /dev/null +++ b/modules/cas_cache/volume/vol_atomic_dev_bottom.h @@ -0,0 +1,31 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __VOL_ATOMIC_DEV_BOTTOM_H__ +#define __VOL_ATOMIC_DEV_BOTTOM_H__ + +#include "../cas_cache.h" + +enum atomic_metadata_mode { + ATOMIC_METADATA_MODE_ELBA, + ATOMIC_METADATA_MODE_SEPBUF, + ATOMIC_METADATA_MODE_NONE, +}; + +struct atomic_dev_params { + unsigned int nsid; + uint64_t size; + enum atomic_metadata_mode metadata_mode; + unsigned is_mode_optimal : 1; + + /* IMPORTANT: If this field is 0, the other fields are invalid! */ + unsigned is_atomic_capable : 1; +}; + +int atomic_dev_init(void); + +void atomic_dev_deinit(void); + +#endif /* __VOL_ATOMIC_DEV_BOTTOM_H__ */ diff --git a/modules/cas_cache/volume/vol_blk_utils.c b/modules/cas_cache/volume/vol_blk_utils.c new file mode 100644 index 000000000..7109856d2 --- /dev/null +++ b/modules/cas_cache/volume/vol_blk_utils.c @@ -0,0 +1,470 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "vol_blk_utils.h" + +static void cas_io_iter_advanced(struct bio_vec_iter *iter, uint32_t bytes) +{ + BUG_ON(bytes > iter->len); + + iter->len -= bytes; + iter->offset += bytes; + + if (iter->len) { + /* Still in this item, bytes to be processed */ + return; + } + + /* Move to next item in data vector */ + iter->idx++; + if (iter->idx < iter->vec_size) { + iter->ivec = &iter->vec[iter->idx]; + iter->len = iter->ivec->bv_len; + iter->offset = iter->ivec->bv_offset; + } else { + iter->ivec = NULL; + iter->len = 0; + iter->offset = 0; + } +} + +uint32_t cas_io_iter_cpy(struct bio_vec_iter *dst, struct bio_vec_iter *src, + uint32_t bytes) +{ + uint32_t to_copy, written = 0; + void *adst, *asrc; + + if (dst->idx >= dst->vec_size) + return 0; + + BUG_ON(dst->offset + dst->len > PAGE_SIZE); + + if (src->idx >= src->vec_size) + return 0; + + BUG_ON(src->offset + src->len > PAGE_SIZE); + + while (bytes) { + to_copy = min(dst->len, src->len); + to_copy = min(to_copy, bytes); + if (to_copy == 0) { + /* No more bytes for coping */ + break; + } + + adst = page_address(dst->ivec->bv_page) + dst->offset; + asrc = page_address(src->ivec->bv_page) + src->offset; + + memcpy(adst, asrc, to_copy); + + bytes -= to_copy; + written += to_copy; + + cas_io_iter_advanced(dst, to_copy); + cas_io_iter_advanced(src, to_copy); + } + + return written; +} + +uint32_t cas_io_iter_cpy_from_data(struct bio_vec_iter *dst, + const void *src, uint32_t bytes) +{ + uint32_t to_copy, written = 0; + void *adst; + const void *asrc; + + if (dst->idx >= dst->vec_size) + return 0; + + BUG_ON(dst->offset + dst->len > PAGE_SIZE); + + while (bytes) { + to_copy = min(dst->len, bytes); + if (to_copy == 0) { + /* No more bytes for coping */ + break; + } + + adst = page_address(dst->ivec->bv_page) + dst->offset; + asrc = src + written; + + memcpy(adst, asrc, to_copy); + + bytes -= to_copy; + written += to_copy; + + cas_io_iter_advanced(dst, to_copy); + } + + return written; +} + +uint32_t cas_io_iter_cpy_to_data(void *dst, struct bio_vec_iter *src, + uint32_t bytes) +{ + uint32_t to_copy, written = 0; + void *adst, *asrc; + + BUG_ON(dst == NULL); + + if (src->idx >= src->vec_size) + return 0; + + BUG_ON(src->offset + src->len > PAGE_SIZE); + + while (bytes) { + to_copy = min(bytes, src->len); + if (to_copy == 0) { + /* No more bytes for coping */ + break; + } + + adst = dst + written; + asrc = page_address(src->ivec->bv_page) + src->offset; + + memcpy(adst, asrc, to_copy); + + bytes -= to_copy; + written += to_copy; + + cas_io_iter_advanced(src, to_copy); + } + + return written; +} + +uint32_t cas_io_iter_move(struct bio_vec_iter *iter, uint32_t bytes) +{ + uint32_t to_move, moved = 0; + + if (iter->idx >= iter->vec_size) + return 0; + + BUG_ON(iter->offset + iter->len > PAGE_SIZE); + + while (bytes) { + to_move = min(iter->len, bytes); + if (to_move == 0) { + /* No more bytes for coping */ + break; + } + + bytes -= to_move; + moved += to_move; + + cas_io_iter_advanced(iter, to_move); + } + + return moved; +} + +uint32_t cas_io_iter_zero(struct bio_vec_iter *dst, uint32_t bytes) +{ + uint32_t to_fill, zeroed = 0; + void *adst; + + if (dst->idx >= dst->vec_size) + return 0; + + BUG_ON(dst->offset + dst->len > PAGE_SIZE); + + while (bytes) { + to_fill = min(dst->len, (typeof(dst->len))PAGE_SIZE); + if (to_fill == 0) { + /* No more bytes for coping */ + break; + } + + adst = page_address(dst->ivec->bv_page) + dst->offset; + + memset(adst, 0, to_fill); + + bytes -= to_fill; + zeroed += to_fill; + + cas_io_iter_advanced(dst, to_fill); + } + + return zeroed; +} + +/* + * + */ +int cas_blk_io_set_data(struct ocf_io *io, + ctx_data_t *ctx_data, uint32_t offset) +{ + struct blkio *blkio = cas_io_to_blkio(io); + struct blk_data *data = ctx_data; + + /* Set BIO vector (IO data) and initialize iterator */ + blkio->data = data; + if (blkio->data) { + cas_io_iter_init(&blkio->iter, blkio->data->vec, + blkio->data->size); + + /* Move into specified offset in BIO vector iterator */ + if (offset != cas_io_iter_move(&blkio->iter, offset)) { + /* TODO Log message */ + blkio->error = -ENOBUFS; + return -ENOBUFS; + } + } + + return 0; +} + +/* + * + */ +ctx_data_t *cas_blk_io_get_data(struct ocf_io *io) +{ + struct blkio *blkio = cas_io_to_blkio(io); + + return blkio->data; +} + +#if defined(CAS_NVME_PARTIAL) + +#include "utils/utils_nvme.h" + +int cas_blk_identify_type_by_bdev(struct block_device *bdev, + uint8_t *type, struct atomic_dev_params *atomic_params) +{ + struct nvme_id_ns *ns; + unsigned int nsid, selected, ms, ds, pi, elba, sbsupp; + long long int ret = 0; + struct atomic_dev_params atomic_params_int = {0}; + + ns = kmalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return -OCF_ERR_NO_MEM; + + ret = cas_nvme_get_nsid(bdev, &nsid); + if (ret < 0) { + /* + * We cannot obtain NSID which means we are not dealing with + * NVMe device + */ + goto out1; + } + + ret = cas_nvme_identify_ns(bdev, nsid, ns); + if (ret < 0) { + /* + * We cannot obtain ns structure which means we ARE dealing with + * NVMe device but can not recognize format so let's treat that + * device as block device + */ + goto out1; + } + + selected = ns->flbas & 0xf; + ms = ns->lbaf[selected].ms; + ds = ns->lbaf[selected].ds; + pi = ns->dps & 0x7; + elba = !!(ns->flbas & (1<<4)); + sbsupp = !!(ns->mc & (1<<1)); + + atomic_params_int.is_atomic_capable = 1; + atomic_params_int.nsid = nsid; + atomic_params_int.size = (ns->nsze << (ds - 9)) * SECTOR_SIZE; + + if (pi != 0) { + /* We don't support formats which have + * enable Protection Information feature. + */ + ret = -KCAS_ERR_NVME_BAD_FORMAT; + goto out2; + } + + switch (ms) { + case 0: + /* NVMe metadata features disabled, so we handle it as + * regular block device + */ + + if (ds != 9 && ds != 12) { + ret = -KCAS_ERR_NVME_BAD_FORMAT; + goto out2; + } + + *type = BLOCK_DEVICE_VOLUME; + atomic_params_int.metadata_mode = ATOMIC_METADATA_MODE_NONE; + +#if !defined(CAS_NVME_FULL) + /* + * Only partial support user can't using + * device in atomic mode, so mode is optimal + */ + atomic_params_int.is_mode_optimal = 1; + break; +#else + if (bdev == bdev->bd_contains) { + /* + * Entire device - format isn't optimal + */ + atomic_params_int.is_mode_optimal = 0; + } else { + /* + * Partition - format is optimal, user can't using + * partitions in atomic mode + */ + atomic_params_int.is_mode_optimal = 1; + } + break; + + case 8: + /* For atomic writes we support only metadata size 8B and + * data size 512B + */ + + if (ds != 9) { + ret = -KCAS_ERR_NVME_BAD_FORMAT; + goto out2; + } + + *type = ATOMIC_DEVICE_VOLUME; + atomic_params_int.metadata_mode = elba ? + ATOMIC_METADATA_MODE_ELBA : + ATOMIC_METADATA_MODE_SEPBUF; + atomic_params_int.is_mode_optimal = sbsupp ? !elba : 1; + break; +#endif + + default: + ret = -KCAS_ERR_NVME_BAD_FORMAT; + } + + if (atomic_params) + *atomic_params = atomic_params_int; + + goto out2; +out1: + *type = BLOCK_DEVICE_VOLUME; + ret = 0; +out2: + kfree(ns); + return ret; +} + +static inline int _cas_detect_blk_type(const char *path, uint8_t *type, + struct atomic_dev_params *atomic_params) +{ + int ret; + struct block_device *bdev; + char holder[] = "CAS DETECT\n"; + + bdev = OPEN_BDEV_EXCLUSIVE(path, FMODE_READ, holder); + if (IS_ERR(bdev)) + return -OCF_ERR_NOT_OPEN_EXC; + + ret = cas_blk_identify_type_by_bdev(bdev, type, atomic_params); + CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); + return ret; +} + +#else + +static inline int _cas_detect_blk_type(const char *path, uint8_t *type, + struct atomic_dev_params *atomic_params) +{ + /* + * NVMe is not supported with given kernel version, so we + * have no way to figure out what the current NVMe format + * is. In this situation we make a naive assumption that + * it's formatted to LBA size 512B, and try to treat it + * as regular block device. + */ + *type = BLOCK_DEVICE_VOLUME; + return 0; +} + +int cas_blk_identify_type_by_bdev(struct block_device *bdev, + uint8_t *type, struct atomic_dev_params *atomic_params) +{ + *type = BLOCK_DEVICE_VOLUME; + return 0; +} +#endif + +int cas_blk_open_volume_by_bdev(ocf_volume_t *vol, + struct block_device *bdev) +{ + struct atomic_dev_params atomic_params = {0}; + struct bd_object *bdobj; + uint8_t type; + int ret; + + ret = cas_blk_identify_type_by_bdev(bdev, &type, &atomic_params); + if (ret) + goto err; + + ret = ocf_ctx_volume_create(cas_ctx, vol, NULL, type); + if (ret) + goto err; + + bdobj = bd_object(*vol); + + bdobj->btm_bd = bdev; + bdobj->opened_by_bdev = true; + + ocf_volume_open(*vol); + + return 0; + +err: + return ret; +} + +void cas_blk_close_volume(ocf_volume_t vol) +{ + ocf_volume_close(vol); + ocf_volume_deinit(vol); + env_free(vol); +} + +int _cas_blk_identify_type(const char *path, uint8_t *type, + struct atomic_dev_params *atomic_params) +{ + struct file *file; + int result = 0; + + file = filp_open(path, O_RDONLY, 0); + if (IS_ERR(file)) + return -OCF_ERR_INVAL_VOLUME_TYPE; + + if (S_ISBLK(FILE_INODE(file)->i_mode)) + *type = BLOCK_DEVICE_VOLUME; + else if (S_ISCHR(FILE_INODE(file)->i_mode)) + *type = NVME_CONTROLLER; + else + result = -OCF_ERR_INVAL_VOLUME_TYPE; + + filp_close(file, 0); + if (result) + return result; + + if (*type == BLOCK_DEVICE_VOLUME) { + result = _cas_detect_blk_type(path, type, atomic_params); + if (result < 0) + return result; + } + + return 0; +} + +int cas_blk_identify_type(const char *path, uint8_t *type) +{ + return _cas_blk_identify_type(path, type, NULL); +} + +int cas_blk_identify_type_atomic(const char *path, uint8_t *type, + struct atomic_dev_params *atomic_params) +{ + return _cas_blk_identify_type(path, type, atomic_params); +} + diff --git a/modules/cas_cache/volume/vol_blk_utils.h b/modules/cas_cache/volume/vol_blk_utils.h new file mode 100644 index 000000000..8b8ec4b93 --- /dev/null +++ b/modules/cas_cache/volume/vol_blk_utils.h @@ -0,0 +1,148 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __VOL_BLK_UTILS_H__ +#define __VOL_BLK_UTILS_H__ + +#include "obj_blk.h" +#include "context.h" + +static inline bool cas_blk_is_flush_io(unsigned long flags) +{ + if ((flags & OCF_WRITE_FLUSH) == OCF_WRITE_FLUSH) + return true; + + if ((flags & OCF_WRITE_FLUSH_FUA) == OCF_WRITE_FLUSH_FUA) + return true; + + return false; +} + +struct blkio { + int error; + atomic_t rq_remaning; + atomic_t ref_counter; + int32_t dirty; + int32_t dir; + + struct blk_data *data; /* IO data buffer */ + + /* BIO vector iterator for sending IO */ + struct bio_vec_iter iter; +}; + +static inline struct blkio *cas_io_to_blkio(struct ocf_io *io) +{ + return ocf_io_get_priv(io); +} + +int cas_blk_io_set_data(struct ocf_io *io, ctx_data_t *data, + uint32_t offset); +ctx_data_t *cas_blk_io_get_data(struct ocf_io *io); + +int cas_blk_identify_type_by_bdev(struct block_device *bdev, + uint8_t *type, struct atomic_dev_params *atomic_params); + +int cas_blk_open_volume_by_bdev(ocf_volume_t *vol, + struct block_device *bdev); +void cas_blk_close_volume(ocf_volume_t vol); + +int cas_blk_identify_type(const char *path, uint8_t *type); + +int cas_blk_identify_type_atomic(const char *path, uint8_t *type, + struct atomic_dev_params *atomic_params); + +static inline void cas_io_iter_init(struct bio_vec_iter *iter, + struct bio_vec *vec, uint32_t vec_size) +{ + iter->vec = iter->ivec = vec; + iter->vec_size = vec_size; + iter->idx = 0; + iter->offset = vec->bv_offset; + iter->len = vec->bv_len; +} + +static inline void cas_io_iter_set(struct bio_vec_iter *iter, + struct bio_vec *vec, uint32_t vec_size, + uint32_t idx, uint32_t offset, uint32_t len) +{ + iter->vec = vec; + iter->vec_size = vec_size; + iter->idx = idx; + iter->offset = offset; + iter->len = len; + + if (iter->idx < vec_size) { + iter->ivec = &vec[iter->idx]; + } else { + iter->ivec = NULL; + WARN(1, "Setting offset out of BIO vector"); + } +} + +static inline void cas_io_iter_copy_set(struct bio_vec_iter *dst, + struct bio_vec_iter *src) +{ + dst->vec = src->vec; + dst->vec_size = src->vec_size; + dst->idx = src->idx; + dst->offset = src->offset; + dst->len = src->len; + dst->ivec = src->ivec; +} + +static inline bool cas_io_iter_is_next(struct bio_vec_iter *iter) +{ + return iter->idx < iter->vec_size ? true : false; + /* TODO UNITTEST */ +} + +static inline uint32_t cas_io_iter_size_done(struct bio_vec_iter *iter) +{ + return iter->idx; + /* TODO UNITTEST */ +} + +static inline uint32_t cas_io_iter_size_left(struct bio_vec_iter *iter) +{ + if (iter->idx < iter->vec_size) + return iter->vec_size - iter->idx; + return 0; + /* TODO UNITTEST */ +} + +static inline uint32_t cas_io_iter_current_offset(struct bio_vec_iter *iter) +{ + return iter->idx < iter->vec_size ? iter->offset : 0; + /* TODO UNITTEST */ +} + +static inline uint32_t cas_io_iter_current_length(struct bio_vec_iter *iter) +{ + return iter->idx < iter->vec_size ? iter->len : 0; + /* TODO UNITTEST */ +} + +static inline struct page *cas_io_iter_current_page(struct bio_vec_iter *iter) +{ + return iter->idx < iter->vec_size ? iter->ivec->bv_page : NULL; + /* TODO UNITTEST */ +} + +uint32_t cas_io_iter_cpy(struct bio_vec_iter *dst, struct bio_vec_iter *src, + uint32_t bytes); + +uint32_t cas_io_iter_cpy_from_data(struct bio_vec_iter *dst, + const void *src, uint32_t bytes); + +uint32_t cas_io_iter_cpy_to_data(void *dst, struct bio_vec_iter *src, + uint32_t bytes); + +uint32_t cas_io_iter_move(struct bio_vec_iter *iter, + uint32_t bytes); + +uint32_t cas_io_iter_zero(struct bio_vec_iter *iter, uint32_t bytes); + +#endif /* __VOL_BLK_UTILS_H__ */ diff --git a/modules/cas_cache/volume/vol_block_dev_bottom.c b/modules/cas_cache/volume/vol_block_dev_bottom.c new file mode 100644 index 000000000..2323e5e24 --- /dev/null +++ b/modules/cas_cache/volume/vol_block_dev_bottom.c @@ -0,0 +1,597 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +#define CAS_DEBUG_IO 0 + +#if CAS_DEBUG_IO == 1 +#define CAS_DEBUG_TRACE() printk(KERN_DEBUG \ + "[IO] %s:%d\n", __func__, __LINE__) + +#define CAS_DEBUG_MSG(msg) printk(KERN_DEBUG \ + "[IO] %s:%d - %s\n", __func__, __LINE__, msg) + +#define CAS_DEBUG_PARAM(format, ...) printk(KERN_DEBUG \ + "[IO] %s:%d - "format"\n", __func__, __LINE__, ##__VA_ARGS__) +#else +#define CAS_DEBUG_TRACE() +#define CAS_DEBUG_MSG(msg) +#define CAS_DEBUG_PARAM(format, ...) +#endif + +int block_dev_open_object(ocf_volume_t vol) +{ + struct bd_object *bdobj = bd_object(vol); + const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(vol); + struct casdsk_disk *dsk; + + if (bdobj->opened_by_bdev) { + /* Bdev has beed set manually, so there is nothing to do. */ + return 0; + } + + if (unlikely(true == cas_upgrade_is_in_upgrade())) { + dsk = casdisk_functions.casdsk_disk_claim(uuid->data, NULL); + casdisk_functions.casdsk_disk_set_attached(dsk); + } else { + dsk = casdisk_functions.casdsk_disk_open(uuid->data, NULL); + } + + if (IS_ERR_OR_NULL(dsk)) { + int error = PTR_ERR(dsk) ?: -EINVAL; + + if (error == -EBUSY) + error = -OCF_ERR_NOT_OPEN_EXC; + + return error; + } + + bdobj->dsk = dsk; + bdobj->btm_bd = casdisk_functions.casdsk_disk_get_blkdev(dsk); + + return 0; +} + +void block_dev_close_object(ocf_volume_t vol) +{ + struct bd_object *bdobj = bd_object(vol); + + if (bdobj->opened_by_bdev) + return; + + if (likely(!cas_upgrade_is_in_upgrade())) { + casdisk_functions.casdsk_disk_close(bdobj->dsk); + } else { + casdisk_functions.casdsk_disk_set_pt(bdobj->dsk); + casdisk_functions.casdsk_disk_dettach(bdobj->dsk); + } +} + +unsigned int block_dev_get_max_io_size(ocf_volume_t vol) +{ + struct bd_object *bdobj = bd_object(vol); + struct block_device *bd = bdobj->btm_bd; + + return queue_max_sectors(bd->bd_disk->queue) << SECTOR_SHIFT; +} + +uint64_t block_dev_get_byte_length(ocf_volume_t vol) +{ + struct bd_object *bdobj = bd_object(vol); + struct block_device *bd = bdobj->btm_bd; + uint64_t sector_length; + + sector_length = (bd->bd_contains == bd) ? + get_capacity(bd->bd_disk) : + bd->bd_part->nr_sects; + + return sector_length << SECTOR_SHIFT; +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0) +static char *__block_dev_get_elevator_name(struct request_queue *q) +{ + if (q->elevator->elevator_type == NULL) + return NULL; + + if (q->elevator->elevator_type->elevator_name == NULL) + return NULL; + + if (q->elevator->elevator_type->elevator_name[0] == 0) + return NULL; + + return q->elevator->elevator_type->elevator_name; +} +#else +static char *__block_dev_get_elevator_name(struct request_queue *q) +{ + if (q->elevator->type == NULL) + return NULL; + + if (q->elevator->type->elevator_name == NULL) + return NULL; + + if (q->elevator->type->elevator_name[0] == 0) + return NULL; + + return q->elevator->type->elevator_name; +} +#endif + +/* + * + */ +const char *block_dev_get_elevator_name(struct request_queue *q) +{ + if (!q) + return NULL; + + if (q->elevator == NULL) + return NULL; + + return __block_dev_get_elevator_name(q); +} + +/* + * + */ +int block_dev_is_metadata_mode_optimal(struct atomic_dev_params *atomic_params, + uint8_t type) +{ + if (type == BLOCK_DEVICE_VOLUME) { + if (atomic_params->is_atomic_capable) + return atomic_params->is_mode_optimal; + } else if (type == ATOMIC_DEVICE_VOLUME) { + return atomic_params->is_mode_optimal; + } + + return 1; +} + +/* + * + */ +static inline struct bio *cas_bd_io_alloc_bio(struct blkio *bdio) +{ + struct bio *bio + = bio_alloc(GFP_NOIO, cas_io_iter_size_left(&bdio->iter)); + + if (bio) + return bio; + + if (cas_io_iter_size_left(&bdio->iter) < MAX_LINES_PER_IO) { + /* BIO vector was small, so it was memory + * common problem - NO RAM!!! + */ + return NULL; + } + + /* Retry with smaller */ + return bio_alloc(GFP_NOIO, MAX_LINES_PER_IO); +} + +/* + * + */ +static void cas_bd_io_end(struct ocf_io *io, int error) +{ + struct blkio *bdio = cas_io_to_blkio(io); + + if (error) + bdio->error |= error; + + if (atomic_dec_return(&bdio->rq_remaning)) + return; + + CAS_DEBUG_MSG("Completion"); + + /* Send completion to caller */ + io->end(io, bdio->error); + + /* Free allocated structures */ + ocf_io_put(io); +} + +/* + * + */ +DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio, + unsigned int bytes_done, int error) +{ + struct ocf_io *io; + struct blkio *bdio; + struct bd_object *bdobj; + int err; + + BUG_ON(!bio); + BUG_ON(!bio->bi_private); + BLOCK_CALLBACK_INIT(bio); + io = bio->bi_private; + bdobj = bd_object(io->volume); + BUG_ON(!bdobj); + err = BLOCK_CALLBACK_ERROR(bio, error); + bdio = cas_io_to_blkio(io); + BUG_ON(!bdio); + + CAS_DEBUG_TRACE(); + + if (err) + goto out; + + if (bdio->dir == OCF_WRITE) { + /* IO was a write */ + + if (!cas_blk_is_flush_io(io->flags)) { + /* Device cache is dirty, mark it */ + atomic_inc(&bdobj->potentially_dirty); + } else { + /* IO flush finished, update potential + * dirty state + */ + atomic_sub(bdio->dirty, &bdobj->potentially_dirty); + } + } +out: + if (err == -EOPNOTSUPP && (BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD)) + err = 0; + + cas_bd_io_end(io, err); + + bio_put(bio); + BLOCK_CALLBACK_RETURN(); +} + +static void block_dev_submit_flush(struct ocf_io *io) +{ +#ifdef CAS_FLUSH_SUPPORTED + struct blkio *blkio = cas_io_to_blkio(io); + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bdev = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bdev); + struct bio *bio = NULL; + + blkio->dirty = atomic_read(&bdobj->potentially_dirty); + + /* Prevent races of completing IO */ + atomic_set(&blkio->rq_remaning, 1); + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + if (!blkio->dirty) { + /* Didn't write anything to underlying disk; no need to + * send req_flush + */ + goto out; + } + + if (q == NULL) { + /* No queue, error */ + blkio->error = -EINVAL; + goto out; + } + + if (!CHECK_QUEUE_FLUSH(q)) { + /* This block device does not support flush, call back */ + atomic_sub(blkio->dirty, &bdobj->potentially_dirty); + goto out; + } + + bio = bio_alloc(GFP_NOIO, 0); + if (bio == NULL) { + CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for BIO\n"); + blkio->error = -ENOMEM; + goto out; + } + + blkio->dir = io->dir; + + bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end); + CAS_BIO_SET_DEV(bio, bdev); + bio->bi_private = io; + + atomic_inc(&blkio->rq_remaning); + cas_submit_bio(OCF_WRITE_FLUSH, bio); + +out: + cas_bd_io_end(io, blkio->error); + +#else + /* Running operating system without support for REQ_FLUSH + * (i.e. SLES 11 SP 1) CAS cannot use flushing requests to + * handle power-fail safe Write-Back + */ + io->end(io, -ENOTSUPP); + + /* on SLES 11 SP 1 powerfail safety can only be achieved + * through disabling volatile write cache of disk itself. + */ +#endif +} + +void block_dev_submit_discard(struct ocf_io *io) +{ + struct blkio *blkio = cas_io_to_blkio(io); + struct bd_object *bdobj = bd_object(io->volume); + struct block_device *bd = bdobj->btm_bd; + struct request_queue *q = bdev_get_queue(bd); + struct bio *bio = NULL; + + unsigned int max_discard_sectors, granularity, bio_sects; + int alignment; + sector_t sects, start, end, tmp; + + /* Prevent races of completing IO */ + atomic_set(&blkio->rq_remaning, 1); + + /* Increase IO reference counter for FLUSH IO */ + ocf_io_get(io); + + if (!q) { + /* No queue, error */ + blkio->error = -ENXIO; + goto out; + } + + if (!blk_queue_discard(q)) { + /* Discard is not supported by bottom device, send completion + * to caller + */ + goto out; + } + + granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U); + alignment = (bdev_discard_alignment(bd) >> SECTOR_SHIFT) % granularity; + max_discard_sectors = + min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); + max_discard_sectors -= max_discard_sectors % granularity; + if (unlikely(!max_discard_sectors)) + goto out; + + sects = io->bytes >> SECTOR_SHIFT; + start = io->addr >> SECTOR_SHIFT; + + while (sects) { + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) { + CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); + blkio->error = -ENOMEM; + break; + } + + bio_sects = min_t(sector_t, sects, max_discard_sectors); + end = start + bio_sects; + tmp = end; + if (bio_sects < sects && + sector_div(tmp, granularity) != alignment) { + end = end - alignment; + sector_div(end, granularity); + end = end * granularity + alignment; + bio_sects = end - start; + } + + CAS_BIO_SET_DEV(bio, bd); + BIO_BISECTOR(bio) = start; + BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT; + bio->bi_next = NULL; + bio->bi_private = io; + bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end); + + atomic_inc(&blkio->rq_remaning); + cas_submit_bio(CAS_BIO_DISCARD, bio); + + sects -= bio_sects; + start = end; + + cond_resched(); + } + +out: + cas_bd_io_end(io, blkio->error); +} + +static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io) +{ + struct blkio *bdio = cas_io_to_blkio(io); + struct bd_object *bdobj = bd_object(io->volume); + + /* Setup DIR */ + bdio->dir = *dir; + + /* Save dirty counter */ + bdio->dirty = atomic_read(&bdobj->potentially_dirty); + + /* Convert CAS direction into kernel values */ + switch (bdio->dir) { + case OCF_READ: + *dir = READ; + break; + + case OCF_WRITE: + *dir = WRITE; + break; + + default: + bdio->error = -EINVAL; + break; + } + + if (!io->bytes) { + /* Don not accept empty request */ + CAS_PRINT_RL(KERN_ERR "Invalid zero size IO\n"); + bdio->error = -EINVAL; + } + + if (bdio->error) + return false; + + return true; +} + +/* + * + */ +static void block_dev_submit_io(struct ocf_io *io) +{ + struct blkio *bdio = cas_io_to_blkio(io); + struct bd_object *bdobj = bd_object(io->volume); + struct bio_vec_iter *iter = &bdio->iter; + uint64_t addr = io->addr; + uint32_t bytes = io->bytes; + int dir = io->dir; + + if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) && + CAS_IS_WRITE_FLUSH(io->flags)) { + CAS_DEBUG_MSG("Flush request"); + /* It is flush requests handle it */ + block_dev_submit_flush(io); + return; + } + + CAS_DEBUG_PARAM("Address = %llu, bytes = %u\n", bdio->addr, + bdio->bytes); + + /* Increase IO reference */ + ocf_io_get(io); + + /* Prevent races of completing IO */ + atomic_set(&bdio->rq_remaning, 1); + + if (!cas_bd_io_prepare(&dir, io)) { + CAS_DEBUG_MSG("Invalid request"); + cas_bd_io_end(io, -EINVAL); + return; + } + + while (cas_io_iter_is_next(iter) && bytes) { + /* Still IO vectors to be sent */ + + /* Allocate BIO */ + struct bio *bio = cas_bd_io_alloc_bio(bdio); + + if (!bio) { + bdio->error = -ENOMEM; + break; + } + + /* Setup BIO */ + CAS_BIO_SET_DEV(bio, bdobj->btm_bd); + BIO_BISECTOR(bio) = addr / SECTOR_SIZE; + bio->bi_next = NULL; + bio->bi_private = io; + BIO_OP_FLAGS(bio) |= io->flags; + BIO_SET_RW_FLAGS(bio); + bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end); + + /* Add pages */ + while (cas_io_iter_is_next(iter) && bytes) { + struct page *page = cas_io_iter_current_page(iter); + uint32_t offset = cas_io_iter_current_offset(iter); + uint32_t length = cas_io_iter_current_length(iter); + int added; + + if (length > bytes) + length = bytes; + + added = bio_add_page(bio, page, length, offset); + BUG_ON(added < 0); + + if (added == 0) { + /* No more space in BIO, stop adding pages */ + break; + } + + /* Update address, bytes sent */ + bytes -= added; + addr += added; + + /* Update BIO vector iterator */ + if (added != cas_io_iter_move(iter, added)) { + bdio->error = -ENOBUFS; + break; + } + } + + if (bdio->error == 0) { + /* Increase IO reference for sending this IO */ + atomic_inc(&bdio->rq_remaning); + + /* Send BIO */ + CAS_DEBUG_MSG("Submit IO"); + cas_submit_bio(dir, bio); + bio = NULL; + } else { + if (bio) { + bio_put(bio); + bio = NULL; + } + + /* ERROR, stop processed */ + break; + } + } + + if (bytes && bdio->error == 0) { + /* Not all bytes sent, mark error */ + bdio->error = -ENOBUFS; + } + + /* Prevent races of completing IO when + * there are still child IOs not being send. + */ + cas_bd_io_end(io, 0); +} + +const struct ocf_volume_properties cas_object_blk_properties = { + .name = "Block Device", + .io_priv_size = sizeof(struct blkio), + .volume_priv_size = sizeof(struct bd_object), + .caps = { + .atomic_writes = 0, /* Atomic writes not supported */ + }, + .ops = { + .submit_io = block_dev_submit_io, + .submit_flush = block_dev_submit_flush, + .submit_metadata = NULL, + .submit_discard = block_dev_submit_discard, + .open = block_dev_open_object, + .close = block_dev_close_object, + .get_max_io_size = block_dev_get_max_io_size, + .get_length = block_dev_get_byte_length, + }, + .io_ops = { + .set_data = cas_blk_io_set_data, + .get_data = cas_blk_io_get_data, + }, +}; + +int block_dev_init(void) +{ + int ret; + + ret = ocf_ctx_register_volume_type(cas_ctx, BLOCK_DEVICE_VOLUME, + &cas_object_blk_properties); + if (ret < 0) + return ret; + + return 0; +} +void block_dev_deinit(void) +{ + ocf_ctx_unregister_volume_type(cas_ctx, BLOCK_DEVICE_VOLUME); +} + +int block_dev_try_get_io_class(struct bio *bio, int *io_class) +{ + struct ocf_io *io; + + if (bio->bi_end_io != REFER_BLOCK_CALLBACK(cas_bd_io_end)) + return -1; + + io = bio->bi_private; + *io_class = io->io_class; + return 0; +} diff --git a/modules/cas_cache/volume/vol_block_dev_bottom.h b/modules/cas_cache/volume/vol_block_dev_bottom.h new file mode 100644 index 000000000..6e1798493 --- /dev/null +++ b/modules/cas_cache/volume/vol_block_dev_bottom.h @@ -0,0 +1,26 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __VOL_BLOCK_DEV_BOTTOM_H__ +#define __VOL_BLOCK_DEV_BOTTOM_H__ + +#include "../cas_cache.h" + +int block_dev_open_object(ocf_volume_t vol); + +void block_dev_close_object(ocf_volume_t vol); + +const char *block_dev_get_elevator_name(struct request_queue *q); + +int block_dev_is_metadata_mode_optimal(struct atomic_dev_params *atomic_params, + uint8_t type); + +int block_dev_try_get_io_class(struct bio *bio, int *io_class); + +int block_dev_init(void); + +void block_dev_deinit(void); + +#endif /* __VOL_BLOCK_DEV_BOTTOM_H__ */ diff --git a/modules/cas_cache/volume/vol_block_dev_top.c b/modules/cas_cache/volume/vol_block_dev_top.c new file mode 100644 index 000000000..ee5c6935e --- /dev/null +++ b/modules/cas_cache/volume/vol_block_dev_top.c @@ -0,0 +1,1013 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include "cas_cache.h" + +#define BLK_RQ_POS(rq) (BIO_BISECTOR((rq)->bio)) +#define BLK_RQ_BYTES(rq) blk_rq_bytes(rq) + +extern u32 use_io_scheduler; + +static inline void __blockdev_end_request_all(struct request *rq, int error) +{ + __blk_end_request_all(rq, error); +} + +static inline void _blockdev_end_request_all(struct request *rq, int error) +{ + blk_end_request_all(rq, error); +} + +static inline bool _blockdev_can_handle_rq(struct request *rq) +{ + int error = 0; + + if (unlikely(!is_rq_type_fs(rq))) + error = __LINE__; + + if (unlikely(rq->next_rq)) + error = __LINE__; + + if (error != 0) { + CAS_PRINT_RL(KERN_ERR "%s cannot handle request (ERROR %d)\n", + rq->rq_disk->disk_name, error); + return false; + } + + return true; +} + +static inline struct request *_blockdev_peek_request(struct request_queue *q) +{ + return blk_peek_request(q); +} + +static inline void _blockdev_start_request(struct request *rq) +{ + blk_start_request(rq); +} + +static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) + struct bio_vec *bvec; + uint32_t i = 0; + + bio_for_each_segment(bvec, bio, i) { + BUG_ON(i >= data->size); + data->vec[i] = *bvec; + } +#else + struct bio_vec bvec; + struct bvec_iter iter; + uint32_t i = 0; + + bio_for_each_segment(bvec, bio, iter) { + BUG_ON(i >= data->size); + data->vec[i] = bvec; + i++; + } +#endif +} + +static inline void _blockdev_start_io_acct(struct bio *bio) +{ + struct gendisk *gd = CAS_BIO_GET_DEV(bio); + + cas_generic_start_io_acct(gd->queue, bio_data_dir(bio), + bio_sectors(bio), &gd->part0); +} + +static inline void _blockdev_end_io_acct(struct bio *bio, + unsigned long start_time) +{ + struct gendisk *gd = CAS_BIO_GET_DEV(bio); + + cas_generic_end_io_acct(gd->queue, bio_data_dir(bio), + &gd->part0, start_time); +} + +void block_dev_start_bio_fast(struct ocf_io *io) +{ + struct blk_data *data = ocf_io_get_data(io); + struct bio *bio = data->master_io_req; + + _blockdev_start_io_acct(bio); +} + +void block_dev_complete_bio_fast(struct ocf_io *io, int error) +{ + struct blk_data *data = ocf_io_get_data(io); + struct bio *bio = data->master_io_req; + + _blockdev_end_io_acct(bio, data->start_time); + + BIO_ENDIO(bio, BIO_BISIZE(bio), error); + ocf_io_put(io); + cas_free_blk_data(data); +} + +void block_dev_complete_bio_discard(struct ocf_io *io, int error) +{ + struct bio *bio = io->priv1; + + BIO_ENDIO(bio, BIO_BISIZE(bio), error); + ocf_io_put(io); +} + +void block_dev_complete_rq(struct ocf_io *io, int error) + +{ + struct blk_data *data = ocf_io_get_data(io); + struct request *rq = data->master_io_req; + + _blockdev_end_request_all(rq, error); + ocf_io_put(io); + cas_free_blk_data(data); +} + +void block_dev_complete_sub_rq(struct ocf_io *io, int error) +{ + struct blk_data *data = ocf_io_get_data(io); + struct ocf_io *master = data->master_io_req; + struct blk_data *master_data = ocf_io_get_data(master); + + if (error) + master_data->error = error; + + if (atomic_dec_return(&master_data->master_remaining) == 0) { + _blockdev_end_request_all(master_data->master_io_req, + master_data->error); + cas_free_blk_data(master_data); + ocf_io_put(master); + } + + ocf_io_put(io); + cas_free_blk_data(data); +} + +void block_dev_complete_flush(struct ocf_io *io, int error) +{ + struct request *rq = io->priv1; + + _blockdev_end_request_all(rq, error); + ocf_io_put(io); +} + +bool _blockdev_is_request_barier(struct request *rq) +{ + struct bio *i_bio = rq->bio; + + for_each_bio(i_bio) { + if (CHECK_BARRIER(i_bio)) + return true; + } + return false; +} + +static int _blockdev_alloc_many_requests(ocf_core_t core, + struct list_head *list, struct request *rq, + struct ocf_io *master) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + int error = 0; + int flags = 0; + struct bio *bio; + struct ocf_io *sub_io; + struct blk_data *master_data = ocf_io_get_data(master); + struct blk_data *data; + + INIT_LIST_HEAD(list); + + /* Go over requests and allocate sub requests */ + bio = rq->bio; + for_each_bio(bio) { + /* Setup BIO flags */ + if (CAS_IS_WRITE_FLUSH_FUA(BIO_OP_FLAGS(bio))) { + /* FLUSH and FUA */ + flags = OCF_WRITE_FLUSH_FUA; + } else if (CAS_IS_WRITE_FUA(BIO_OP_FLAGS(bio))) { + /* FUA */ + flags = OCF_WRITE_FUA; + } else if (CAS_IS_WRITE_FLUSH(BIO_OP_FLAGS(bio))) { + /* FLUSH - It shall be handled in request handler */ + error = -EINVAL; + break; + } else { + flags = 0; + } + + data = cas_alloc_blk_data(bio_segments(bio), GFP_ATOMIC); + if (!data) { + CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n"); + error = -ENOMEM; + break; + } + + _blockdev_set_bio_data(data, bio); + + data->master_io_req = master; + + sub_io = ocf_core_new_io(core); + if (!sub_io) { + cas_free_blk_data(data); + error = -ENOMEM; + break; + } + + data->io = sub_io; + + ocf_io_configure(sub_io, BIO_BISECTOR(bio) << SECTOR_SHIFT, + BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ? + OCF_READ : OCF_WRITE, + cas_cls_classify(cache, bio), flags); + + error = ocf_io_set_data(sub_io, data, 0); + if (error) { + ocf_io_put(sub_io); + cas_free_blk_data(data); + break; + } + + ocf_io_set_queue(sub_io, cache_priv->io_queues[smp_processor_id()]); + ocf_io_set_cmpl(sub_io, NULL, NULL, block_dev_complete_sub_rq); + + list_add_tail(&data->list, list); + atomic_inc(&master_data->master_remaining); + } + + if (error) { + CAS_PRINT_RL(KERN_ERR "Cannot handle request (ERROR %d)\n", error); + + /* Go over list and free all */ + while (!list_empty(list)) { + data = list_first_entry(list, struct blk_data, list); + list_del(&data->list); + + sub_io = data->io; + ocf_io_put(sub_io); + cas_free_blk_data(data); + } + } + + return error; +} + +static void _blockdev_set_request_data(struct blk_data *data, struct request *rq) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) + struct req_iterator iter; + struct bio_vec *bvec; + uint32_t i = 0; + + rq_for_each_segment(bvec, rq, iter) { + BUG_ON(i >= data->size); + data->vec[i] = *bvec; + i++; + } +#else + struct req_iterator iter; + struct bio_vec bvec; + uint32_t i = 0; + + rq_for_each_segment(bvec, rq, iter) { + BUG_ON(i >= data->size); + data->vec[i] = bvec; + i++; + } +#endif +} + +/** + * @brief push flush request upon execution queue for given core device + */ +static int _blkdev_handle_flush_request(struct request *rq, ocf_core_t core) +{ + struct ocf_io *io; + ocf_cache_t cache = ocf_core_get_cache(core); + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + + io = ocf_core_new_io(core); + if (!io) + return -ENOMEM; + + ocf_io_configure(io, 0, 0, OCF_WRITE, 0, OCF_WRITE_FLUSH); + + ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]); + ocf_io_set_cmpl(io, rq, NULL, block_dev_complete_flush); + + ocf_core_submit_flush(io); + + return 0; +} + +#ifdef RQ_CHECK_CONTINOUS +static inline bool _bvec_is_mergeable(struct bio_vec *bv1, struct bio_vec *bv2) +{ + if (bv1 == NULL) + return true; + + if (BIOVEC_PHYS_MERGEABLE(bv1, bv2)) + return true; + + return !bv2->bv_offset && !((bv1->bv_offset + bv1->bv_len) % PAGE_SIZE); +} +#endif + +static uint32_t _blkdev_scan_request(ocf_cache_t cache, struct request *rq, + struct ocf_io *io, bool *single_io) +{ + uint32_t size = 0; + struct req_iterator iter; + struct bio *bio_prev = NULL; + uint32_t io_class; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + struct bio_vec bvec; +#ifdef RQ_CHECK_CONTINOUS + struct bio_vec bvec_prev = { NULL, }; +#endif +#else + struct bio_vec *bvec; +#ifdef RQ_CHECK_CONTINOUS + struct bio_vec *bvec_prev = NULL; +#endif +#endif + + *single_io = true; + + /* Scan BIOs in the request to: + * 1. Count the segments number + * 2. Check if requests contains many IO classes + * 3. Check if request is continuous (when process kernel stack is 8KB) + */ + rq_for_each_segment(bvec, rq, iter) { + /* Increase BIO data vector counter */ + size++; + + if (*single_io == false) { + /* Already detected complex request */ + continue; + } + +#ifdef RQ_CHECK_CONTINOUS + /* + * If request is not continous submit each bio as separate + * request, and prevent nvme driver from splitting requests. + * For large requests, nvme splitting causes stack overrun. + */ + if (!_bvec_is_mergeable(SEGMENT_BVEC(bvec_prev), + SEGMENT_BVEC(bvec))) { + *single_io = false; + continue; + } + bvec_prev = bvec; +#endif + + if (bio_prev == iter.bio) + continue; + + bio_prev = iter.bio; + + /* Get class ID for given BIO */ + io_class = cas_cls_classify(cache, iter.bio); + + if (io->io_class != io_class) { + /* + * Request contains BIO with different IO classes and + * need to handle BIO separately + */ + *single_io = false; + } + } + + return size; +} + +static int _blkdev_handle_request(struct request *rq, ocf_core_t core) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + struct ocf_io *io; + struct blk_data *data; + int master_flags = 0; + bool single_io; + uint32_t size; + int ret; + + if (_blockdev_is_request_barier(rq)) { + CAS_PRINT_RL(KERN_WARNING + "special bio was sent,not supported!\n"); + return -ENOTSUPP; + } + + if ((rq->cmd_flags & REQ_FUA) && RQ_IS_FLUSH(rq)) { + /* FLUSH and FUA */ + master_flags = OCF_WRITE_FLUSH_FUA; + } else if (rq->cmd_flags & REQ_FUA) { + /* FUA */ + master_flags = OCF_WRITE_FUA; + } else if (RQ_IS_FLUSH(rq)) { + /* FLUSH */ + return _blkdev_handle_flush_request(rq, core); + } + + io = ocf_core_new_io(core); + if (!io) { + CAS_PRINT_RL(KERN_CRIT "Out of memory. Ending IO processing.\n"); + return -ENOMEM; + } + + ocf_io_configure(io, BLK_RQ_POS(rq) << SECTOR_SHIFT, BLK_RQ_BYTES(rq), + (RQ_DATA_DIR(rq) == RQ_DATA_DIR_WR) ? + OCF_WRITE : OCF_READ, + cas_cls_classify(cache, rq->bio), master_flags); + + size = _blkdev_scan_request(cache, rq, io, &single_io); + + if (unlikely(size == 0)) { + CAS_PRINT_RL(KERN_ERR "Empty IO request\n"); + ocf_io_put(io); + return -EINVAL; + } + + if (single_io) { + data = cas_alloc_blk_data(size, GFP_ATOMIC); + if (data == NULL) { + CAS_PRINT_RL(KERN_CRIT + "Out of memory. Ending IO processing.\n"); + ocf_io_put(io); + return -ENOMEM; + } + + _blockdev_set_request_data(data, rq); + + data->master_io_req = rq; + + ret = ocf_io_set_data(io, data, 0); + if (ret) { + ocf_io_put(io); + cas_free_blk_data(data); + return -EINVAL; + } + + ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]); + ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_rq); + + ocf_core_submit_io(io); + } else { + struct list_head list = LIST_HEAD_INIT(list); + + data = cas_alloc_blk_data(0, GFP_ATOMIC); + if (data == NULL) { + printk(KERN_CRIT + "Out of memory. Ending IO processing.\n"); + ocf_io_put(io); + return -ENOMEM; + } + data->master_io_req = rq; + + if (ocf_io_set_data(io, data, 0)) { + ocf_io_put(io); + cas_free_blk_data(data); + return -EINVAL; + } + + /* Allocate setup and setup */ + ret = _blockdev_alloc_many_requests(core, &list, rq, io); + if (ret < 0) { + printk(KERN_CRIT + "Out of memory. Ending IO processing.\n"); + cas_free_blk_data(data); + ocf_io_put(io); + return -ENOMEM; + } + + BUG_ON(list_empty(&list)); + + /* Go over list and push request to the engine */ + while (!list_empty(&list)) { + struct ocf_io *sub_io; + + data = list_first_entry(&list, struct blk_data, list); + list_del(&data->list); + + sub_io = data->io; + + ocf_core_submit_io(sub_io); + } + } + + return 0; +} + +static inline int _blkdev_can_hndl_bio(struct bio *bio) +{ + if (CHECK_BARRIER(bio)) { + CAS_PRINT_RL(KERN_WARNING + "special bio was sent, not supported!\n"); + BIO_ENDIO(bio, BIO_BISIZE(bio), -EOPNOTSUPP); + return -ENOTSUPP; + } + + return 0; +} + +static inline bool _blkdev_is_flush_fua_bio(struct bio *bio) +{ + if (CAS_IS_WRITE_FLUSH_FUA(BIO_OP_FLAGS(bio))) { + /* FLUSH and FUA */ + return true; + } else if (CAS_IS_WRITE_FUA(BIO_OP_FLAGS(bio))) { + /* FUA */ + return true; + } else if (CAS_IS_WRITE_FLUSH(BIO_OP_FLAGS(bio))) { + /* FLUSH */ + return true; + + } + + return false; +} + +void _blockdev_set_exported_object_flush_fua(ocf_core_t core) +{ +#ifdef CAS_FLUSH_SUPPORTED + ocf_cache_t cache = ocf_core_get_cache(core); + ocf_volume_t core_vol = ocf_core_get_volume(core); + ocf_volume_t cache_vol = ocf_cache_get_volume(cache); + struct bd_object *bd_core_vol, *bd_cache_vol; + struct request_queue *core_q, *exp_q, *cache_q; + bool flush, fua; + + bd_core_vol = bd_object(core_vol); + bd_cache_vol = bd_object(cache_vol); + + core_q = casdisk_functions.casdsk_disk_get_queue(bd_core_vol->dsk); + exp_q = casdisk_functions.casdsk_exp_obj_get_queue(bd_core_vol->dsk); + cache_q = casdisk_functions.casdsk_disk_get_queue(bd_cache_vol->dsk); + + flush = (CHECK_QUEUE_FLUSH(core_q) || CHECK_QUEUE_FLUSH(cache_q)); + fua = (CHECK_QUEUE_FUA(core_q) || CHECK_QUEUE_FUA(cache_q)); + + cas_set_queue_flush_fua(exp_q, flush, fua); +#endif +} + +static int _blockdev_calc_discard_alignment(ocf_cache_t cache, + struct block_device *core_bd) +{ + unsigned int granularity, offset; + sector_t start; + + if (core_bd == core_bd->bd_contains) + return 0; + + start = core_bd->bd_part->start_sect; + granularity = ocf_cache_get_line_size(cache) >> SECTOR_SHIFT; + + offset = sector_div(start, granularity); + offset = (granularity - offset) % granularity; + + return offset << SECTOR_SHIFT; +} + +static void _blockdev_set_discard_properties(ocf_cache_t cache, + struct request_queue *exp_q, struct block_device *cache_bd, + struct block_device *core_bd, sector_t core_sectors) +{ + struct request_queue *core_q; + struct request_queue *cache_q; + + core_q = bdev_get_queue(core_bd); + cache_q = bdev_get_queue(cache_bd); + + cas_queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, exp_q); + + CAS_SET_DISCARD_ZEROES_DATA(exp_q->limits, 0); + if (core_q && blk_queue_discard(core_q)) { + blk_queue_max_discard_sectors(exp_q, core_q->limits.max_discard_sectors); + exp_q->limits.discard_alignment = + bdev_discard_alignment(core_bd); + exp_q->limits.discard_granularity = + core_q->limits.discard_granularity; + } else { + blk_queue_max_discard_sectors(exp_q, core_sectors); + exp_q->limits.discard_granularity = + ocf_cache_get_line_size(cache); + exp_q->limits.discard_alignment = + _blockdev_calc_discard_alignment(cache, core_bd); + } +} + +/** + * Map geometry of underlying (core) object geometry (sectors etc.) + * to geometry of exported object. + */ +static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private) +{ + ocf_core_t core; + ocf_cache_t cache; + ocf_volume_t core_vol; + ocf_volume_t cache_vol; + struct bd_object *bd_cache_vol; + struct request_queue *core_q, *cache_q, *exp_q; + struct block_device *core_bd, *cache_bd; + sector_t sectors; + const char *path; + + BUG_ON(!private); + core = private; + cache = ocf_core_get_cache(core); + core_vol = ocf_core_get_volume(core); + cache_vol = ocf_cache_get_volume(cache); + bd_cache_vol = bd_object(cache_vol); + path = ocf_volume_get_uuid(core_vol)->data; + + core_bd = casdisk_functions.casdsk_disk_get_blkdev(dsk); + BUG_ON(!core_bd); + + cache_bd = casdisk_functions.casdsk_disk_get_blkdev(bd_cache_vol->dsk); + BUG_ON(!cache_bd); + + core_q = core_bd->bd_contains->bd_disk->queue; + cache_q = cache_bd->bd_disk->queue; + exp_q = casdisk_functions.casdsk_exp_obj_get_queue(dsk); + + sectors = ocf_volume_get_length(core_vol) >> SECTOR_SHIFT; + + set_capacity(casdisk_functions.casdsk_exp_obj_get_gendisk(dsk), sectors); + + cas_copy_queue_limits(exp_q, cache_q, core_q); + + if (exp_q->limits.logical_block_size > + core_q->limits.logical_block_size) { + printk(KERN_ERR "Cache device logical sector size is " + "greater than core device %s logical sector size.\n", + path); + return -KCAS_ERR_UNALIGNED; + } + + blk_queue_stack_limits(exp_q, core_q); + + /* We don't want to receive splitted requests*/ + SET_QUEUE_CHUNK_SECTORS(exp_q, 0); + + _blockdev_set_exported_object_flush_fua(core); + + _blockdev_set_discard_properties(cache, exp_q, cache_bd, core_bd, + sectors); + + return 0; +} + +static inline bool _blockdev_is_elevator_inited(struct request_queue *q) +{ + return !!block_dev_get_elevator_name(q); +} + +static int _blockdev_prep_rq_fn(struct casdsk_disk *dsk, struct request_queue *q, + struct request *rq, void *private) +{ + ocf_core_t core; + ocf_volume_t obj; + struct bd_object *bvol; + + BUG_ON(!private); + core = private; + obj = ocf_core_get_volume(core); + bvol = bd_object(obj); + BUG_ON(!bvol); + + atomic64_inc(&bvol->pending_rqs); + + return BLKPREP_OK; +} + +static int _blockdev_prepare_queue(struct casdsk_disk *dsk, + struct request_queue *q, void *private) +{ + if (!_blockdev_is_elevator_inited(q)) + return -EINVAL; + + return 0; +} + +static void _blockdev_make_request_discard(struct casdsk_disk *dsk, + struct request_queue *q, struct bio *bio, void *private) +{ + ocf_core_t core = private; + ocf_cache_t cache = ocf_core_get_cache(core); + struct cache_priv *cache_priv = ocf_cache_get_priv(cache); + struct ocf_io *io; + + io = ocf_core_new_io(core); + if (!io) { + CAS_PRINT_RL(KERN_CRIT + "Out of memory. Ending IO processing.\n"); + BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM); + return; + } + + ocf_io_configure(io, BIO_BISECTOR(bio) << SECTOR_SHIFT, BIO_BISIZE(bio), + 0, 0, 0); + + ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]); + ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_bio_discard); + + ocf_core_submit_discard(io); +} + +static int _blockdev_make_request_fast(struct casdsk_disk *dsk, + struct request_queue *q, struct bio *bio, void *private) +{ + ocf_core_t core; + ocf_cache_t cache; + struct cache_priv *cache_priv; + struct ocf_io *io; + struct blk_data *data; + int ret; + + BUG_ON(!private); + core = private; + cache = ocf_core_get_cache(core); + cache_priv = ocf_cache_get_priv(cache); + + if (in_interrupt()) + return CASDSK_BIO_NOT_HANDLED; + + if (_blkdev_can_hndl_bio(bio)) + return CASDSK_BIO_HANDLED; + + if (_blkdev_is_flush_fua_bio(bio)) + return CASDSK_BIO_NOT_HANDLED; + + if (CAS_IS_DISCARD(bio)) { + _blockdev_make_request_discard(dsk, q, bio, private); + return CASDSK_BIO_HANDLED; + } + + if (unlikely(BIO_BISIZE(bio) == 0)) { + CAS_PRINT_RL(KERN_ERR + "Not able to handle empty BIO, flags = " + BIO_OP_FLAGS_FORMAT "\n", BIO_OP_FLAGS(bio)); + BIO_ENDIO(bio, BIO_BISIZE(bio), -EINVAL); + return CASDSK_BIO_HANDLED; + } + + data = cas_alloc_blk_data(bio_segments(bio), GFP_ATOMIC); + if (!data) { + CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n"); + BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM); + return CASDSK_BIO_HANDLED; + } + + _blockdev_set_bio_data(data, bio); + + data->master_io_req = bio; + data->start_time = jiffies; + + io = ocf_core_new_io(core); + if (!io) { + printk(KERN_CRIT "Out of memory. Ending IO processing.\n"); + cas_free_blk_data(data); + BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM); + return CASDSK_BIO_HANDLED; + } + + ocf_io_configure(io, BIO_BISECTOR(bio) << SECTOR_SHIFT, BIO_BISIZE(bio), + (bio_data_dir(bio) == READ) ? OCF_READ : OCF_WRITE, + cas_cls_classify(cache, bio), 0); + + ret = ocf_io_set_data(io, data, 0); + if (ret < 0) { + ocf_io_put(io); + cas_free_blk_data(data); + BIO_ENDIO(bio, BIO_BISIZE(bio), -EINVAL); + return CASDSK_BIO_HANDLED; + } + + ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]); + ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_bio_fast); + ocf_io_set_start(io, block_dev_start_bio_fast); + + ret = ocf_core_submit_io_fast(io); + if (ret < 0) + goto err; + + return CASDSK_BIO_HANDLED; + +err: + /* + * - Not able to processed fast path for this BIO, + * - Cleanup current request + * - Put it to the IO scheduler + */ + ocf_io_put(io); + cas_free_blk_data(data); + + return CASDSK_BIO_NOT_HANDLED; +} + +static void _blockdev_request_fn(struct casdsk_disk *dsk, struct request_queue *q, + void *private) +{ + ocf_core_t core; + ocf_volume_t obj; + struct bd_object *bvol; + struct request *rq; + int result; + + BUG_ON(!private); + core = private; + obj = ocf_core_get_volume(core); + bvol = bd_object(obj); + + while (true) { + rq = _blockdev_peek_request(q); + if (rq == NULL) + break; + + _blockdev_start_request(rq); + + if (!_blockdev_can_handle_rq(rq)) { + __blockdev_end_request_all(rq, -EIO); + continue; + } + + spin_unlock_irq(q->queue_lock); + + result = _blkdev_handle_request(rq, core); + + spin_lock_irq(q->queue_lock); + + if (result) + __blockdev_end_request_all(rq, result); + + atomic64_dec(&bvol->pending_rqs); + } +} + +static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = { + .prepare_queue = _blockdev_prepare_queue, + .set_geometry = _blockdev_set_geometry, + .make_request_fn = _blockdev_make_request_fast, + .request_fn = _blockdev_request_fn, + .prep_rq_fn = _blockdev_prep_rq_fn, +}; + +/** + * @brief this routine actually adds /dev/casM-N inode + */ +int block_dev_activate_exported_object(ocf_core_t core) +{ + int ret; + ocf_volume_t obj = ocf_core_get_volume(core); + struct bd_object *bvol = bd_object(obj); + + if (!cas_upgrade_is_in_upgrade()) { + ret = casdisk_functions.casdsk_exp_obj_activate(bvol->dsk); + if (-EEXIST == ret) + return KCAS_ERR_FILE_EXISTS; + } else { + ret = casdisk_functions.casdsk_disk_attach(bvol->dsk, THIS_MODULE, + &_blockdev_exp_obj_ops); + } + return ret; +} + +int block_dev_create_exported_object(ocf_core_t core) +{ + ocf_volume_t obj = ocf_core_get_volume(core); + ocf_cache_t cache = ocf_core_get_cache(core); + struct bd_object *bvol = bd_object(obj); + const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(obj); + char dev_name[DISK_NAME_LEN]; + struct casdsk_disk *dsk; + int result; + + snprintf(dev_name, DISK_NAME_LEN, "cas%d-%d", + ocf_cache_get_id(cache), + ocf_core_get_id(core)); + + dsk = casdisk_functions.casdsk_disk_claim(uuid->data, core); + if (dsk != bvol->dsk) + return -KCAS_ERR_SYSTEM; + + if (cas_upgrade_is_in_upgrade()) { + bvol->expobj_valid = true; + return 0; + } + + result = casdisk_functions.casdsk_exp_obj_create(dsk, dev_name, + THIS_MODULE, &_blockdev_exp_obj_ops); + if (!result) + bvol->expobj_valid = true; + + return result; +} + +int block_dev_destroy_exported_object(ocf_core_t core) +{ + int ret = 0; + ocf_volume_t obj = ocf_core_get_volume(core); + struct bd_object *bvol = bd_object(obj); + + if (!bvol->expobj_valid) + return 0; + + ret = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk); + if (ret) { + if (-EBUSY == ret) + ret = -KCAS_ERR_DEV_PENDING; + return ret; + } + + ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk); + casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk); + + if (!ret) + bvol->expobj_valid = false; + + return ret; +} + +static int _block_dev_lock_exported_object(ocf_core_t core, void *cntx) +{ + int result; + struct bd_object *bvol = bd_object( + ocf_core_get_volume(core)); + + result = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk); + + if (-EBUSY == result) { + printk(KERN_WARNING "Stopping %s failed - device in use\n", + casdisk_functions.casdsk_exp_obj_get_gendisk(bvol->dsk)->disk_name); + return -KCAS_ERR_DEV_PENDING; + } else if (result) { + printk(KERN_WARNING "Stopping %s failed - device unavailable\n", + casdisk_functions.casdsk_exp_obj_get_gendisk(bvol->dsk)->disk_name); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + bvol->expobj_locked = true; + return 0; +} + +static int _block_dev_unlock_exported_object(ocf_core_t core, void *cntx) +{ + struct bd_object *bvol = bd_object( + ocf_core_get_volume(core)); + + if (bvol->expobj_locked) { + casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk); + bvol->expobj_locked = false; + } + + return 0; +} + +static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx) +{ + struct bd_object *bvol = bd_object( + ocf_core_get_volume(core)); + + if (bvol->expobj_valid) { + BUG_ON(!bvol->expobj_locked); + + printk(KERN_INFO "Stopping device %s\n", + casdisk_functions.casdsk_exp_obj_get_gendisk(bvol->dsk)->disk_name); + + casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk); + bvol->expobj_valid = false; + } + + if (bvol->expobj_locked) { + casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk); + bvol->expobj_locked = false; + } + + return 0; +} + +int block_dev_destroy_all_exported_objects(ocf_cache_t cache) +{ + int result; + + /* Try lock exported objects */ + result = ocf_core_visit(cache, _block_dev_lock_exported_object, NULL, + true); + if (result) { + /* Failure, unlock already locked exported objects */ + ocf_core_visit(cache, _block_dev_unlock_exported_object, NULL, + true); + return result; + } + + ocf_core_visit(cache, _block_dev_stop_exported_object, NULL, true); + + return 0; +} diff --git a/modules/cas_cache/volume/vol_block_dev_top.h b/modules/cas_cache/volume/vol_block_dev_top.h new file mode 100644 index 000000000..aef7b304a --- /dev/null +++ b/modules/cas_cache/volume/vol_block_dev_top.h @@ -0,0 +1,17 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __VOL_BLOCK_DEV_TOP_H__ +#define __VOL_BLOCK_DEV_TOP_H__ + +int block_dev_activate_exported_object(ocf_core_t core); + +int block_dev_create_exported_object(ocf_core_t core); + +int block_dev_destroy_exported_object(ocf_core_t core); + +int block_dev_destroy_all_exported_objects(ocf_cache_t cache); + +#endif /* __VOL_BLOCK_DEV_TOP_H__ */ diff --git a/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe b/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe new file mode 100644 index 000000000..24724ca83 --- /dev/null +++ b/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe @@ -0,0 +1,2 @@ +/home/robert/work/cas/ICAS_Linux/modules/cas_cache/volume/vol_block_dev_top.o-.text-9bd +/home/robert/work/cas/ICAS_Linux/modules/cas_cache/volume/vol_block_dev_top.o-.text-9c4 diff --git a/modules/cas_disk/Makefile b/modules/cas_disk/Makefile new file mode 100644 index 000000000..66e75239c --- /dev/null +++ b/modules/cas_disk/Makefile @@ -0,0 +1,12 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# +include $(M)/config.mk + +obj-m := cas_disk.o + +cas_disk-objs = main.o +cas_disk-objs += disk.o +cas_disk-objs += exp_obj.o +cas_disk-objs += sysfs.o diff --git a/modules/cas_disk/cas_disk.h b/modules/cas_disk/cas_disk.h new file mode 100644 index 000000000..bd4d0ae2e --- /dev/null +++ b/modules/cas_disk/cas_disk.h @@ -0,0 +1,253 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CASDISK_H__ +#define __CASDISK_H__ + +#include + +/** + * Version of cas_disk interface + */ +#define CASDSK_IFACE_VERSION 2 + +struct casdsk_disk; + +#define CASDSK_BIO_NOT_HANDLED 0 +#define CASDSK_BIO_HANDLED 1 + +struct casdsk_exp_obj_ops { + + /** + * @brief Prepare request queue of exported object (top) block device. + * Could be NULL. + */ + int (*prepare_queue)(struct casdsk_disk *dsk, struct request_queue *q, + void *private); + + /** + * @brief Cleanup request queue of exported object (top) block device. + * Could be NULL. + */ + void (*cleanup_queue)(struct casdsk_disk *dsk, struct request_queue *q, + void *private); + + /** + * @brief Set geometry of exported object (top) block device. + * Could be NULL. + */ + int (*set_geometry)(struct casdsk_disk *dsk, void *private); + + /** + * @brief make_request_fn of exported object (top) block device. + * Called by cas_disk when cas_disk device is in attached mode. + * + * @return casdsk_BIO_HANDLED when bio was handled. + * Otherwise casdsk_BIO_NOT_HANDLED. In this case bio will be submitted + * to I/O scheduler and should be handled by request_fn. + */ + int (*make_request_fn)(struct casdsk_disk *dsk, struct request_queue *q, + struct bio *bio, void *private); + + /** + * @brief request_fn of exported object (top) block device. + * Called by cas_disk when cas_disk device is in attached mode. + */ + void (*request_fn)(struct casdsk_disk *dsk, struct request_queue *q, + void *private); + + /** + * @brief prep_rq_fn of exported object (top) block device. + * Called by cas_disk when cas_disk device is in attached mode. + */ + int (*prep_rq_fn)(struct casdsk_disk *dsk, struct request_queue *q, + struct request *rq, void *private); + + /** + * @brief ioctl handler of exported object (top) block device. + * Called by cas_disk when cas_disk device is in attached mode. + */ + int (*ioctl)(struct casdsk_disk *dsk, unsigned int cmd, unsigned long arg, + void *private); +}; + +/** + * Stored configuration buffer description + */ +struct casdsk_props_conf { + void *buffer; + size_t size; + uint16_t crc; +}; + +/** + * @brief Get version of cas_disk interface + * @return cas_disk interface version + */ +uint32_t casdsk_get_version(void); + +/** + * @brief Store configuration buffers in cas_disk + * @param n_blobs Number of configuration buffers + * @param blobs Array of configuration buffers structures + */ +void casdsk_store_config(size_t n_blobs, struct casdsk_props_conf *blobs); + +/** + * @brief Get previously stored configuration buffers + * @param blobs Where to store pointer to configuration buffers array + * @return Number of stored configuration buffers + */ +size_t casdsk_get_stored_config(struct casdsk_props_conf **blobs); + +/** + * @brief Free resources related to stored configuration buffers + */ +void casdsk_free_stored_config(void); + +/** + * @brief Open block device + * @param path Path to block device + * @param private Private data + * @return Pointer to casdsk_disk related to opened block device + */ +struct casdsk_disk *casdsk_disk_open(const char *path, void *private); + +/** + * @brief Claim previously opened block device (holded by cas_disk) + * @param path Path to block device + * @param private Private data + * @return Pointer to casdsk_disk structure related to block device, or NULL + * if device is not opened by cas_disk. + */ +struct casdsk_disk *casdsk_disk_claim(const char *path, void *private); + +/** + * @brief Close block device and remove from cas_disk + * @param dsk Pointer to casdsk_disk structure related to block device + * which should be closed. + */ +void casdsk_disk_close(struct casdsk_disk *dsk); + +/** + * @brief Get block_device structure of bottom block device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return Pointer to block_device structure of bottom block device + */ +struct block_device *casdsk_disk_get_blkdev(struct casdsk_disk *dsk); + +/** + * @brief Get request queue of bottom block device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return Pointer to reqest_queue structure of bottom block device + */ +struct request_queue *casdsk_disk_get_queue(struct casdsk_disk *dsk); + +/** + * @brief Get gendisk structure of bottom block device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return Pointer to gendisk structure of bottom block device + */ +struct gendisk *casdsk_disk_get_gendisk(struct casdsk_disk *dsk); + +/** + * @brief Prepare cas_disk device to switch to pass-through mode + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_disk_set_pt(struct casdsk_disk *dsk); + +/** + * @brief Prepare cas_disk device to switch to attached mode + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_disk_set_attached(struct casdsk_disk *dsk); + +/** + * @brief Revert cas_disk device back to attached mode + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_disk_clear_pt(struct casdsk_disk *dsk); + +/** + * @brief Dettach cas from cas_disk device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_disk_dettach(struct casdsk_disk *dsk); + +/** + * @brief Attach cas to cas_disk device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @param owner Pointer to cas module + * @param ops Pointer to structure with callback functions + * @return 0 if success, errno if failure + */ +int casdsk_disk_attach(struct casdsk_disk *dsk, struct module *owner, + struct casdsk_exp_obj_ops *ops); + +/** + * @brief Create exported object (top device) + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @param dev_name Name of exported object (top device) + * @param owner Pointer to cas module + * @param ops Pointer to structure with callback functions + * @return 0 if success, errno if failure + */ +int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name, + struct module *owner, struct casdsk_exp_obj_ops *ops); + +/** + * @brief Get request queue of exported object (top) block device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return Pointer to reqest_queue structure of top block device + */ +struct request_queue *casdsk_exp_obj_get_queue(struct casdsk_disk *dsk); + +/** + * @brief Get gendisk structure of exported object (top) block device + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return Pointer to gendisk structure of top block device + */ +struct gendisk *casdsk_exp_obj_get_gendisk(struct casdsk_disk *dsk); + +/** + * @brief Activate exported object (make it visible to OS + * and allow I/O handling) + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_exp_obj_activate(struct casdsk_disk *dsk); + +/** + * @brief Check if exported object is active + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return true if exported object is active + */ +bool casdsk_exp_obj_activated(struct casdsk_disk *ds); + +/** + * @brief Lock exported object + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_exp_obj_lock(struct casdsk_disk *dsk); + +/** + * @brief Unlock exported object + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_exp_obj_unlock(struct casdsk_disk *dsk); + +/** + * @brief Destroy exported object + * @param dsk Pointer to casdsk_disk structure related to cas_disk device + * @return 0 if success, errno if failure + */ +int casdsk_exp_obj_destroy(struct casdsk_disk *dsk); + +#endif diff --git a/modules/cas_disk/cas_disk_defs.h b/modules/cas_disk/cas_disk_defs.h new file mode 100644 index 000000000..0cafc3d5f --- /dev/null +++ b/modules/cas_disk/cas_disk_defs.h @@ -0,0 +1,93 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CASDISK_DEFS_H__ +#define __CASDISK_DEFS_H__ + +#include +#include +#include +#include +#include +#include + +struct casdsk_stored_config { + size_t n_blobs; + struct casdsk_props_conf *blobs; +}; + +struct casdsk_module { + struct mutex lock; + + struct list_head disk_list; + uint32_t next_disk_id; + int disk_major; + int next_minor; + + struct kmem_cache *disk_cache; + struct kmem_cache *exp_obj_cache; + struct kmem_cache *pt_io_ctx_cache; + struct kmem_cache *pending_rqs_cache; + + struct kobject kobj; + + struct casdsk_stored_config config; +}; + +extern struct casdsk_module *casdsk_module; + +/* prefixes for messages */ +#define CASDSK_LOGO "CAS Disk" +#define CASDSK_PREFIX_SHORT "[" CASDSK_LOGO "] " +#define CASDSK_PREFIX_LONG "Cache Acceleration Software Linux" + +#define CASDSK_KERN_EMERG KERN_EMERG""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_ALERT KERN_ALERT""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_CRIT KERN_CRIT""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_ERR KERN_ERR""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_WARNING KERN_WARNING""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_NOTICE KERN_NOTICE""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_INFO KERN_INFO""CASDSK_PREFIX_SHORT +#define CASDSK_KERN_DEBUG KERN_DEBUG""CASDSK_PREFIX_SHORT + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) +static inline struct block_device *open_bdev_exclusive(const char *path, + fmode_t mode, + void *holder) +{ + return blkdev_get_by_path(path, mode | FMODE_EXCL, holder); +} + +static inline void close_bdev_exclusive(struct block_device *bdev, fmode_t mode) +{ + blkdev_put(bdev, mode | FMODE_EXCL); +} + +static inline int bd_claim_by_disk(struct block_device *bdev, void *holder, + struct gendisk *disk) +{ + return bd_link_disk_holder(bdev, disk); +} + +static inline void bd_release_from_disk(struct block_device *bdev, + struct gendisk *disk) +{ + return bd_unlink_disk_holder(bdev, disk); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) + #define KRETURN(x) ({ return (x); }) + #define MAKE_RQ_RET_TYPE blk_qc_t +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) + #define KRETURN(x) return + #define MAKE_RQ_RET_TYPE void +#else + #define KRETURN(x) ({ return (x); }) + #define MAKE_RQ_RET_TYPE int +#endif + +#include "debug.h" + +#endif diff --git a/modules/cas_disk/debug.h b/modules/cas_disk/debug.h new file mode 100644 index 000000000..2357d3078 --- /dev/null +++ b/modules/cas_disk/debug.h @@ -0,0 +1,45 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CASDISK_DEBUG_H__ +#define __CASDISK_DEBUG_H__ + +#undef CASDSK_DEBUG + +#ifdef CASDSK_DEBUG +#define CASDSK_DEBUG_TRACE() \ + printk(CASDSK_KERN_INFO "%s\n", __func__) + +#define CASDSK_DEBUG_DISK_TRACE(dsk) \ + printk(CASDSK_KERN_INFO "[%u] %s\n", dsk->id, __func__) + +#define CASDSK_DEBUG_MSG(msg) \ + printk(CASDSK_KERN_INFO "%s - %s\n", __func__, msg) + +#define CASDSK_DEBUG_PARAM(format, ...) \ + printk(CASDSK_KERN_INFO "%s - "format"\n", \ + __func__, ##__VA_ARGS__) + +#define CASDSK_DEBUG_DISK(dsk, format, ...) \ + printk(CASDSK_KERN_INFO "[%u] %s - "format"\n", \ + dsk->id, \ + __func__, ##__VA_ARGS__) + +#define CASDSK_DEBUG_ERROR(error, ...) \ + CASDSK_DEBUG_PARAM("ERROR(%d) "error, __LINE__, ##__VA_ARGS__) + +#define CASDSK_DEBUG_DISK_ERROR(dsk, error, ...) \ + CASDSK_DEBUG_DISK(dsk, "ERROR(%d) "error, __LINE__, ##__VA_ARGS__) + +#else +#define CASDSK_DEBUG_TRACE() +#define CASDSK_DEBUG_DISK_TRACE(dsk) +#define CASDSK_DEBUG_MSG(msg) +#define CASDSK_DEBUG_PARAM(format, ...) +#define CASDSK_DEBUG_DISK(dsk, format, ...) +#define CASDSK_DEBUG_ERROR(error, ...) +#define CASDSK_DEBUG_DISK_ERROR(dsk, error, ...) +#endif + +#endif diff --git a/modules/cas_disk/disk.c b/modules/cas_disk/disk.c new file mode 100644 index 000000000..9a9e836b1 --- /dev/null +++ b/modules/cas_disk/disk.c @@ -0,0 +1,452 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include +#include +#include "cas_disk_defs.h" +#include "cas_cache.h" +#include "disk.h" +#include "exp_obj.h" +#include "sysfs.h" + +#define CASDSK_DISK_OPEN_FMODE (FMODE_READ | FMODE_WRITE) + +static const char * const _casdsk_disk_modes[] = { + [CASDSK_MODE_UNKNOWN] = "unknown", + [CASDSK_MODE_PT] = "pass-through", + [CASDSK_MODE_ATTACHED] = "attached", + [CASDSK_MODE_TRANS_TO_PT] = "attached -> pass-through", + [CASDSK_MODE_TRANS_TO_ATTACHED] = "pass-through -> attached" +}; + +static void _casdsk_disk_release(struct kobject *kobj) +{ + struct casdsk_disk *dsk; + + BUG_ON(!kobj); + + dsk = casdsk_kobj_to_disk(kobj); + BUG_ON(!dsk); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + kfree(dsk->path); + + kmem_cache_free(casdsk_module->disk_cache, dsk); +} + +static ssize_t _casdsk_disk_mode_show(struct kobject *kobj, char *page) +{ + struct casdsk_disk *dsk = casdsk_kobj_to_disk(kobj); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + return scnprintf(page, PAGE_SIZE, "%s", + _casdsk_disk_modes[atomic_read(&dsk->mode)]); +} + +static struct casdsk_attribute _casdsk_disk_mode_attr = + __ATTR(mode, S_IRUGO, _casdsk_disk_mode_show, NULL); + +static struct attribute *_casdsk_disk_attrs[] = { + &_casdsk_disk_mode_attr.attr, + NULL +}; + +static struct kobj_type casdsk_disk_ktype = { + .release = _casdsk_disk_release, + .sysfs_ops = &casdsk_sysfs_ops, + .default_attrs = _casdsk_disk_attrs +}; + +int __init casdsk_init_disks(void) +{ + CASDSK_DEBUG_TRACE(); + + casdsk_module->next_disk_id = 1; + INIT_LIST_HEAD(&casdsk_module->disk_list); + + casdsk_module->disk_major = register_blkdev(casdsk_module->disk_major, + "cas"); + if (casdsk_module->disk_major <= 0) { + CASDSK_DEBUG_ERROR("Cannot allocate major number"); + return -EINVAL; + } + CASDSK_DEBUG_PARAM("Allocated major number: %d", casdsk_module->disk_major); + + casdsk_module->disk_cache = + kmem_cache_create("casdsk_disk", sizeof(struct casdsk_disk), + 0, 0, NULL); + if (!casdsk_module->disk_cache) { + unregister_blkdev(casdsk_module->disk_major, "cas"); + return -ENOMEM; + } + + return 0; +} + +void casdsk_deinit_disks(void) +{ + CASDSK_DEBUG_TRACE(); + + kmem_cache_destroy(casdsk_module->disk_cache); + unregister_blkdev(casdsk_module->disk_major, "cas"); +} + +static int _casdsk_disk_init_kobject(struct casdsk_disk *dsk) +{ + int result = 0; + + kobject_init(&dsk->kobj, &casdsk_disk_ktype); + result = kobject_add(&dsk->kobj, &disk_to_dev(dsk->bd->bd_disk)->kobj, + "cas%d", dsk->id); + if (result) + CASDSK_DEBUG_DISK_ERROR(dsk, "Cannot register kobject"); + + return result; +} + +struct casdsk_disk *casdsk_disk_open(const char *path, void *private) +{ + struct casdsk_disk *dsk; + int result = 0; + + BUG_ON(!path); + + CASDSK_DEBUG_TRACE(); + + dsk = kmem_cache_zalloc(casdsk_module->disk_cache, GFP_KERNEL); + if (!dsk) { + CASDSK_DEBUG_ERROR("Cannot allocate memory"); + result = -ENOMEM; + goto error_kmem; + } + mutex_init(&dsk->lock); + + dsk->path = kstrdup(path, GFP_KERNEL); + if (!dsk->path) { + result = -ENOMEM; + goto error_kstrdup; + } + + atomic_set(&dsk->mode, CASDSK_MODE_UNKNOWN); + + dsk->bd = open_bdev_exclusive(path, CASDSK_DISK_OPEN_FMODE, dsk); + if (IS_ERR(dsk->bd)) { + CASDSK_DEBUG_ERROR("Cannot open exclusive"); + result = PTR_ERR(dsk->bd); + goto error_open_bdev; + } + + dsk->private = private; + + mutex_lock(&casdsk_module->lock); + + dsk->id = casdsk_module->next_disk_id++; + list_add(&dsk->list, &casdsk_module->disk_list); + + mutex_unlock(&casdsk_module->lock); + + result = _casdsk_disk_init_kobject(dsk); + if (result) + goto error_kobject; + + CASDSK_DEBUG_DISK(dsk, "Created (%p)", dsk); + + return dsk; + +error_kobject: + mutex_lock(&casdsk_module->lock); + list_del(&dsk->list); + mutex_unlock(&casdsk_module->lock); + close_bdev_exclusive(dsk->bd, CASDSK_DISK_OPEN_FMODE); +error_open_bdev: + kfree(dsk->path); +error_kstrdup: + kmem_cache_free(casdsk_module->disk_cache, dsk); +error_kmem: + return ERR_PTR(result); +} +EXPORT_SYMBOL(casdsk_disk_open); + +static void _casdsk_disk_claim(struct casdsk_disk *dsk, void *private) +{ + dsk->private = private; +} + +struct casdsk_disk *casdsk_disk_claim(const char *path, void *private) +{ + struct list_head *item; + struct casdsk_disk *dsk = NULL; + + BUG_ON(!path); + + mutex_lock(&casdsk_module->lock); + list_for_each(item, &casdsk_module->disk_list) { + dsk = list_entry(item, struct casdsk_disk, list); + if (strncmp(path, dsk->path, PATH_MAX) == 0) { + _casdsk_disk_claim(dsk, private); + mutex_unlock(&casdsk_module->lock); + return dsk; + } + } + mutex_unlock(&casdsk_module->lock); + return NULL; +} +EXPORT_SYMBOL(casdsk_disk_claim); + +static void __casdsk_disk_close(struct casdsk_disk *dsk) +{ + close_bdev_exclusive(dsk->bd, CASDSK_DISK_OPEN_FMODE); + + casdsk_exp_obj_free(dsk); + kobject_put(&dsk->kobj); +} + +void casdsk_disk_close(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->bd); + + CASDSK_DEBUG_DISK(dsk, "Destroying (%p)", dsk); + + mutex_lock(&casdsk_module->lock); + + list_del(&dsk->list); + + mutex_unlock(&casdsk_module->lock); + + __casdsk_disk_close(dsk); +} +EXPORT_SYMBOL(casdsk_disk_close); + +void __exit casdsk_disk_shutdown_all(void) +{ + struct list_head *item, *n; + struct casdsk_disk *dsk; + + CASDSK_DEBUG_TRACE(); + + mutex_lock(&casdsk_module->lock); + + list_for_each_safe(item, n, &casdsk_module->disk_list) { + dsk = list_entry(item, struct casdsk_disk, list); + + list_del(item); + + casdsk_disk_lock(dsk); + + BUG_ON(!casdsk_disk_is_pt(dsk) && !casdsk_disk_is_unknown(dsk)); + + if (casdsk_disk_is_pt(dsk)) { + atomic_set(&dsk->mode, CASDSK_MODE_TRANS_TO_SHUTDOWN); + casdsk_exp_obj_prepare_shutdown(dsk); + } + + atomic_set(&dsk->mode, CASDSK_MODE_SHUTDOWN); + + if (dsk->exp_obj) { + casdsk_exp_obj_lock(dsk); + casdsk_exp_obj_destroy(dsk); + casdsk_exp_obj_unlock(dsk); + } + + casdsk_disk_unlock(dsk); + __casdsk_disk_close(dsk); + + } + + mutex_unlock(&casdsk_module->lock); +} + +struct block_device *casdsk_disk_get_blkdev(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + return dsk->bd; +} +EXPORT_SYMBOL(casdsk_disk_get_blkdev); + +struct gendisk *casdsk_disk_get_gendisk(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->bd); + return dsk->bd->bd_disk; +} +EXPORT_SYMBOL(casdsk_disk_get_gendisk); + +struct request_queue *casdsk_disk_get_queue(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->bd); + BUG_ON(!dsk->bd->bd_contains); + BUG_ON(!dsk->bd->bd_contains->bd_disk); + return dsk->bd->bd_contains->bd_disk->queue; +} +EXPORT_SYMBOL(casdsk_disk_get_queue); + +int casdsk_disk_allocate_minors(int count) +{ + int minor = -1; + + mutex_lock(&casdsk_module->lock); + if (casdsk_module->next_minor + count <= (1 << MINORBITS)) { + minor = casdsk_module->next_minor; + casdsk_module->next_minor += count; + } + mutex_unlock(&casdsk_module->lock); + + return minor; +} + +static inline int __casdsk_disk_set_pt(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + atomic_set(&dsk->mode, CASDSK_MODE_TRANS_TO_PT); + casdsk_exp_obj_prepare_pt(dsk); + return 0; +} + +int casdsk_disk_set_pt(struct casdsk_disk *dsk) +{ + int result; + + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (!dsk->exp_obj) + return 0; + + casdsk_disk_lock(dsk); + result = __casdsk_disk_set_pt(dsk); + casdsk_disk_unlock(dsk); + + return result; +} +EXPORT_SYMBOL(casdsk_disk_set_pt); + +static inline int __casdsk_disk_set_attached(struct casdsk_disk *dsk) +{ + atomic_set(&dsk->mode, CASDSK_MODE_TRANS_TO_ATTACHED); + casdsk_exp_obj_prepare_attached(dsk); + + return 0; +} + +int casdsk_disk_set_attached(struct casdsk_disk *dsk) +{ + int result; + + BUG_ON(!dsk); + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (!dsk->exp_obj) + return 0; + + casdsk_disk_lock(dsk); + result = __casdsk_disk_set_attached(dsk); + casdsk_disk_unlock(dsk); + + return result; +} +EXPORT_SYMBOL(casdsk_disk_set_attached); + +static inline int __casdsk_disk_clear_pt(struct casdsk_disk *dsk) +{ + BUG_ON(atomic_read(&dsk->mode) != CASDSK_MODE_TRANS_TO_PT); + atomic_set(&dsk->mode, CASDSK_MODE_ATTACHED); + return 0; +} + +int casdsk_disk_clear_pt(struct casdsk_disk *dsk) +{ + int result; + + BUG_ON(!dsk); + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (!dsk->exp_obj) + return 0; + + casdsk_disk_lock(dsk); + result = __casdsk_disk_clear_pt(dsk); + casdsk_disk_unlock(dsk); + + return result; +} +EXPORT_SYMBOL(casdsk_disk_clear_pt); + +static inline int __casdsk_disk_dettach(struct casdsk_disk *dsk) +{ + int result; + + BUG_ON(atomic_read(&dsk->mode) != CASDSK_MODE_TRANS_TO_PT); + + atomic_set(&dsk->mode, CASDSK_MODE_PT); + + result = casdsk_exp_obj_dettach(dsk); + if (result) { + atomic_set(&dsk->mode, CASDSK_MODE_ATTACHED); + return result; + } + + return 0; +} + +int casdsk_disk_dettach(struct casdsk_disk *dsk) +{ + int result; + + BUG_ON(!dsk); + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (!dsk->exp_obj) + return 0; + + casdsk_disk_lock(dsk); + result = __casdsk_disk_dettach(dsk); + casdsk_disk_unlock(dsk); + + return result; + +} +EXPORT_SYMBOL(casdsk_disk_dettach); + +static inline int __casdsk_disk_attach(struct casdsk_disk *dsk, + struct module *owner, struct casdsk_exp_obj_ops *ops) +{ + int result; + + BUG_ON(!ops); + BUG_ON(atomic_read(&dsk->mode) != CASDSK_MODE_TRANS_TO_ATTACHED); + + result = casdsk_exp_obj_attach(dsk, owner, ops); + if (result) { + atomic_set(&dsk->mode, CASDSK_MODE_PT); + return result; + } + + atomic_set(&dsk->mode, CASDSK_MODE_ATTACHED); + + return 0; +} + +int casdsk_disk_attach(struct casdsk_disk *dsk, struct module *owner, + struct casdsk_exp_obj_ops *ops) +{ + int result; + + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (!dsk->exp_obj) + return 0; + + casdsk_disk_lock(dsk); + result = __casdsk_disk_attach(dsk, owner, ops); + casdsk_disk_unlock(dsk); + + return result; + +} +EXPORT_SYMBOL(casdsk_disk_attach); diff --git a/modules/cas_disk/disk.h b/modules/cas_disk/disk.h new file mode 100644 index 000000000..8a8d95071 --- /dev/null +++ b/modules/cas_disk/disk.h @@ -0,0 +1,96 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __ +#define __CASDISK_DISK_H__ + +#include +#include +#include +#include + +struct casdsk_exp_obj; + +#define CASDSK_MODE_UNKNOWN 0 +#define CASDSK_MODE_PT (1 << 0) +#define CASDSK_MODE_ATTACHED (1 << 1) +#define CASDSK_MODE_SHUTDOWN (1 << 2) +#define CASDSK_MODE_TRANSITION (1 << 3) +#define CASDSK_MODE_TRANS_TO_ATTACHED (CASDSK_MODE_PT | CASDSK_MODE_TRANSITION) +#define CASDSK_MODE_TRANS_TO_PT (CASDSK_MODE_ATTACHED | \ + CASDSK_MODE_TRANSITION) +#define CASDSK_MODE_TRANS_TO_SHUTDOWN (CASDSK_MODE_SHUTDOWN | \ + CASDSK_MODE_TRANSITION) + +struct casdsk_disk { + uint32_t id; + atomic_t mode; + char *path; + + struct mutex lock; + + struct block_device *bd; + + int gd_flags; + int gd_minors; + + struct casdsk_exp_obj *exp_obj; + + struct kobject kobj; + struct list_head list; + + void *private; +}; + +int __init casdsk_init_disks(void); +void casdsk_deinit_disks(void); + +void __exit casdsk_disk_shutdown_all(void); + +int casdsk_disk_allocate_minors(int count); + +static inline void casdsk_disk_lock(struct casdsk_disk *dsk) +{ + mutex_lock(&dsk->lock); +} + +static inline void casdsk_disk_unlock(struct casdsk_disk *dsk) +{ + mutex_unlock(&dsk->lock); +} + +static inline struct casdsk_disk *casdsk_kobj_to_disk(struct kobject *kobj) +{ + return container_of(kobj, struct casdsk_disk, kobj); +} + +static inline bool casdsk_disk_in_transition(struct casdsk_disk *dsk) +{ + return (atomic_read(&dsk->mode) & CASDSK_MODE_TRANSITION) == + CASDSK_MODE_TRANSITION; +} + +static inline bool casdsk_disk_is_attached(struct casdsk_disk *dsk) +{ + return (atomic_read(&dsk->mode) & CASDSK_MODE_ATTACHED) == + CASDSK_MODE_ATTACHED; +} + +static inline bool casdsk_disk_is_pt(struct casdsk_disk *dsk) +{ + return (atomic_read(&dsk->mode) & CASDSK_MODE_PT) == CASDSK_MODE_PT; +} + +static inline bool casdsk_disk_is_shutdown(struct casdsk_disk *dsk) +{ + return (atomic_read(&dsk->mode) & CASDSK_MODE_SHUTDOWN) == + CASDSK_MODE_SHUTDOWN; +} + +static inline bool casdsk_disk_is_unknown(struct casdsk_disk *dsk) +{ + return atomic_read(&dsk->mode) == CASDSK_MODE_UNKNOWN; +} + +#endif diff --git a/modules/cas_disk/exp_obj.c b/modules/cas_disk/exp_obj.c new file mode 100644 index 000000000..aa599eba7 --- /dev/null +++ b/modules/cas_disk/exp_obj.c @@ -0,0 +1,842 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include +#include +#include +#include +#include +#include + +#include "cas_disk_defs.h" +#include "cas_disk.h" +#include "disk.h" +#include "exp_obj.h" +#include "linux_kernel_version.h" + +#define CASDSK_DEV_MINORS 16 +#define KMEM_CACHE_MIN_SIZE sizeof(void *) + +int __init casdsk_init_exp_objs(void) +{ + int ncpus; + + CASDSK_DEBUG_TRACE(); + + casdsk_module->exp_obj_cache = kmem_cache_create("casdsk_exp_obj", + sizeof(struct casdsk_exp_obj), 0, 0, NULL); + if (!casdsk_module->exp_obj_cache) + goto error_exp_obj_cache; + + ncpus = num_online_cpus(); + + casdsk_module->pending_rqs_cache = + kmem_cache_create("casdsk_exp_obj_pending_rqs", + ((sizeof(atomic_t) * ncpus) < KMEM_CACHE_MIN_SIZE) ? + KMEM_CACHE_MIN_SIZE : (sizeof(atomic_t) * ncpus), + 0, 0, NULL); + if (!casdsk_module->pending_rqs_cache) + goto error_pending_rqs_cache; + + casdsk_module->pt_io_ctx_cache = + kmem_cache_create("casdsk_exp_obj_pt_io_ctx", + sizeof(struct casdsk_exp_obj_pt_io_ctx), + 0, 0, NULL); + if (!casdsk_module->pt_io_ctx_cache) + goto error_pt_io_ctx_cache; + + return 0; + +error_pt_io_ctx_cache: + kmem_cache_destroy(casdsk_module->pending_rqs_cache); +error_pending_rqs_cache: + kmem_cache_destroy(casdsk_module->exp_obj_cache); +error_exp_obj_cache: + return -ENOMEM; +} + +void casdsk_deinit_exp_objs(void) +{ + CASDSK_DEBUG_TRACE(); + + kmem_cache_destroy(casdsk_module->pt_io_ctx_cache); + kmem_cache_destroy(casdsk_module->pending_rqs_cache); + kmem_cache_destroy(casdsk_module->exp_obj_cache); +} + +static int _casdsk_exp_obj_prep_rq_fn(struct request_queue *q, struct request *rq) +{ + struct casdsk_disk *dsk;; + + BUG_ON(!q); + BUG_ON(!q->queuedata); + dsk = q->queuedata; + BUG_ON(!dsk->exp_obj); + + if (likely(dsk->exp_obj->ops && dsk->exp_obj->ops->prep_rq_fn)) + return dsk->exp_obj->ops->prep_rq_fn(dsk, q, rq, dsk->private); + else + return BLKPREP_OK; +} + +static void _casdsk_exp_obj_request_fn(struct request_queue *q) +{ + struct casdsk_disk *dsk; + struct request *rq; + + BUG_ON(!q); + BUG_ON(!q->queuedata); + dsk = q->queuedata; + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + + if (likely(dsk->exp_obj->ops && dsk->exp_obj->ops->request_fn)) { + dsk->exp_obj->ops->request_fn(dsk, q, dsk->private); + } else { + /* + * request_fn() is required, as we can't do any default + * action in attached mode. In PT mode we handle all bios + * directly in make_request_fn(), so request_fn() will not + * be called. + */ + + rq = blk_peek_request(q); + BUG_ON(rq); + } +} + +static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk, + struct request_queue *q, + struct bio *bio) +{ + int status = CASDSK_BIO_NOT_HANDLED; + + if (likely(dsk->exp_obj->ops->make_request_fn)) + status = dsk->exp_obj->ops-> + make_request_fn(dsk, q, bio, dsk->private); + + if (status == CASDSK_BIO_NOT_HANDLED) + dsk->exp_obj->mk_rq_fn(q, bio); +} + +DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio, + unsigned int bytes_done, int error) +{ + struct casdsk_exp_obj_pt_io_ctx *io; + + BUG_ON(!bio); + BLOCK_CALLBACK_INIT(bio); + + io = bio->bi_private; + BUG_ON(!io); + BIO_ENDIO(io->bio, BIO_BISIZE(io->bio), + BLOCK_CALLBACK_ERROR(bio, error)); + + if (atomic_dec_return(&io->dsk->exp_obj->pt_ios) < 0) + BUG(); + + bio_put(bio); + kmem_cache_free(casdsk_module->pt_io_ctx_cache, io); + BLOCK_CALLBACK_RETURN(); +} + +static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk, + struct request_queue *q, + struct bio *bio) +{ + struct bio *cloned_bio; + struct casdsk_exp_obj_pt_io_ctx *io; + + io = kmem_cache_zalloc(casdsk_module->pt_io_ctx_cache, GFP_ATOMIC); + if (!io) { + BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM); + return; + } + + cloned_bio = cas_bio_clone(bio, GFP_ATOMIC); + if (!cloned_bio) { + kmem_cache_free(casdsk_module->pt_io_ctx_cache, io); + BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM); + return; + } + + io->bio = bio; + io->dsk = dsk; + + atomic_inc(&dsk->exp_obj->pt_ios); + + CAS_BIO_SET_DEV(cloned_bio, casdsk_disk_get_blkdev(dsk)); + cloned_bio->bi_private = io; + cloned_bio->bi_end_io = REFER_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io); + cas_submit_bio(BIO_OP_FLAGS(cloned_bio), cloned_bio); +} + +static inline void _casdsk_exp_obj_handle_bio(struct casdsk_disk *dsk, + struct request_queue *q, + struct bio *bio) +{ + if (likely(casdsk_disk_is_attached(dsk))) + _casdsk_exp_obj_handle_bio_att(dsk, q, bio); + else if (casdsk_disk_is_pt(dsk)) + _casdsk_exp_obj_handle_bio_pt(dsk, q, bio); + else if (casdsk_disk_is_shutdown(dsk)) + BIO_ENDIO(bio, BIO_BISIZE(bio), -EIO); + else + BUG(); +} + +static inline void _casdsk_exp_obj_end_rq(struct casdsk_disk *dsk, unsigned int cpu) +{ + return atomic_dec(&dsk->exp_obj->pending_rqs[cpu]); +} + +static inline unsigned int _casdsk_exp_obj_begin_rq(struct casdsk_disk *dsk) +{ + unsigned int cpu; + + BUG_ON(!dsk); + +retry: + while (unlikely(casdsk_disk_in_transition(dsk))) + io_schedule(); + + cpu = smp_processor_id(); + atomic_inc(&dsk->exp_obj->pending_rqs[cpu]); + + if (unlikely(casdsk_disk_in_transition(dsk))) { + /* + * If we are in transition state, decrement pending rqs counter + * and retry bio processing + */ + _casdsk_exp_obj_end_rq(dsk, cpu); + goto retry; + } + + return cpu; +} + +static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q, + struct bio *bio) +{ + struct casdsk_disk *dsk; + unsigned int cpu; + + BUG_ON(!bio); + BUG_ON(!q); + BUG_ON(!q->queuedata); + dsk = q->queuedata; + + cpu = _casdsk_exp_obj_begin_rq(dsk); + + _casdsk_exp_obj_handle_bio(dsk, q, bio); + + _casdsk_exp_obj_end_rq(dsk, cpu); + + KRETURN(0); +} + +static int _casdsk_get_next_part_no(struct block_device *bd) +{ + int part_no = 0; + struct gendisk *disk = bd->bd_disk; + struct disk_part_iter piter; + struct hd_struct *part; + + mutex_lock(&bd->bd_mutex); + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) { + part_no = part->partno; + break; + } + disk_part_iter_exit(&piter); + + mutex_unlock(&bd->bd_mutex); + + return part_no; +} + +static int _casdsk_del_partitions(struct block_device *bd) +{ + int result = 0; + int part_no; + struct blkpg_partition bpart; + struct blkpg_ioctl_arg barg; + + memset(&bpart, 0, sizeof(struct blkpg_partition)); + memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); + barg.data = (void __force __user *) &bpart; + barg.op = BLKPG_DEL_PARTITION; + + + while ((part_no = _casdsk_get_next_part_no(bd))) { + bpart.pno = part_no; + result = ioctl_by_bdev(bd, BLKPG, (unsigned long) &barg); + if (result == 0) { + printk(CASDSK_KERN_INFO "Partition %d on %s hidden\n", + part_no, bd->bd_disk->disk_name); + } else { + printk(CASDSK_KERN_ERR "Error(%d) hiding the partition %d on %s\n", + result, part_no, bd->bd_disk->disk_name); + break; + } + } + + return result; +} + +#ifdef GENHD_FL_NO_PART_SCAN +static int _casdsk_flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT; +#else +static int _casdsk_flags = GENHD_FL_EXT_DEVT; +#endif + +static int _casdsk_exp_obj_hide_parts(struct casdsk_disk *dsk) +{ + struct block_device *bd = casdsk_disk_get_blkdev(dsk); + struct gendisk *gdsk = casdsk_disk_get_gendisk(dsk); + + if (bd != bd->bd_contains) + /* It is partition, no more job required */ + return 0; + + if (disk_max_parts(dsk->bd->bd_disk) > 1) { + if (_casdsk_del_partitions(bd)) { + printk(CASDSK_KERN_ERR "Error deleting a partition on thedevice %s\n", + gdsk->disk_name); + + /* Try restore previous partitions by rescaning */ + ioctl_by_bdev(bd, BLKRRPART, (unsigned long) NULL); + return -EINVAL; + } + } + + /* Save original flags and minors */ + dsk->gd_flags = gdsk->flags & _casdsk_flags; + dsk->gd_minors = gdsk->minors; + + /* Setup disk of bottom device as not partitioned device */ + gdsk->flags &= ~_casdsk_flags; + gdsk->minors = 1; + /* Rescan partitions */ + ioctl_by_bdev(bd, BLKRRPART, (unsigned long) NULL); + + return 0; +} + +static int _casdsk_exp_obj_set_dev_t(struct casdsk_disk *dsk, struct gendisk *gd) +{ + int flags; + int minors = disk_max_parts(casdsk_disk_get_gendisk(dsk)); + struct block_device *bdev; + + bdev = casdsk_disk_get_blkdev(dsk); + BUG_ON(!bdev); + + if (bdev->bd_contains != bdev) { + minors = 1; + flags = 0; + } else { + if (_casdsk_exp_obj_hide_parts(dsk)) + return -EINVAL; + flags = dsk->gd_flags; + } + + gd->first_minor = casdsk_disk_allocate_minors(minors); + if (gd->first_minor < 0) { + CASDSK_DEBUG_DISK_ERROR(dsk, "Cannot allocate %d minors", minors); + return -EINVAL; + } + gd->minors = minors; + + gd->major = casdsk_module->disk_major; + gd->flags |= flags; + + return 0; +} + +static void _casdsk_exp_obj_clear_dev_t(struct casdsk_disk *dsk) +{ + struct block_device *bdev = casdsk_disk_get_blkdev(dsk); + struct gendisk *gdsk = casdsk_disk_get_gendisk(dsk); + + if (bdev->bd_contains == bdev) { + /* Restore previous configuration of bottom disk */ + gdsk->minors = dsk->gd_minors; + gdsk->flags |= dsk->gd_flags; + ioctl_by_bdev(bdev, BLKRRPART, (unsigned long) NULL); + } +} + +static const struct block_device_operations _casdsk_exp_obj_ops = { + .owner = THIS_MODULE, +}; + +static int casdsk_exp_obj_alloc(struct casdsk_disk *dsk) +{ + struct casdsk_exp_obj *exp_obj; + int result; + + BUG_ON(!dsk); + BUG_ON(dsk->exp_obj); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + exp_obj = kmem_cache_zalloc(casdsk_module->exp_obj_cache, GFP_KERNEL); + if (!exp_obj) { + CASDSK_DEBUG_ERROR("Cannot allocate memory"); + result = -ENOMEM; + goto error_exp_obj_alloc; + } + + exp_obj->pending_rqs = kmem_cache_zalloc(casdsk_module->pending_rqs_cache, + GFP_KERNEL); + if (!exp_obj->pending_rqs) { + result = -ENOMEM; + goto error_pending_rqs_alloc; + } + + dsk->exp_obj = exp_obj; + + return 0; + +error_pending_rqs_alloc: + kmem_cache_free(casdsk_module->exp_obj_cache, exp_obj); +error_exp_obj_alloc: + return result; +} + +void casdsk_exp_obj_free(struct casdsk_disk *dsk) +{ + struct casdsk_exp_obj *exp_obj; + + CASDSK_DEBUG_DISK_TRACE(dsk); + + exp_obj = dsk->exp_obj; + + if (!exp_obj) + return; + + kobject_put(&exp_obj->kobj); + dsk->exp_obj = NULL; +} + +static void __casdsk_exp_obj_release(struct casdsk_exp_obj *exp_obj) +{ + kfree(exp_obj->dev_name); + kmem_cache_free(casdsk_module->pending_rqs_cache, exp_obj->pending_rqs); + kmem_cache_free(casdsk_module->exp_obj_cache, exp_obj); +} + +static void _casdsk_exp_obj_release(struct kobject *kobj) +{ + struct casdsk_exp_obj *exp_obj; + struct module *owner; + + BUG_ON(!kobj); + + exp_obj = casdsk_kobj_to_exp_obj(kobj); + BUG_ON(!exp_obj); + + CASDSK_DEBUG_TRACE(); + + owner = exp_obj->owner; + + __casdsk_exp_obj_release(exp_obj); + + if (owner) + module_put(owner); +} + +static struct kobj_type casdsk_exp_obj_ktype = { + .release = _casdsk_exp_obj_release +}; + +static int _casdsk_exp_obj_init_kobject(struct casdsk_disk *dsk) +{ + int result = 0; + struct casdsk_exp_obj *exp_obj = dsk->exp_obj; + + kobject_init(&exp_obj->kobj, &casdsk_exp_obj_ktype); + result = kobject_add(&exp_obj->kobj, &dsk->kobj, + "%s", exp_obj->dev_name); + if (result) + CASDSK_DEBUG_DISK_ERROR(dsk, "Cannot register kobject"); + + return result; +} + +int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name, + struct module *owner, struct casdsk_exp_obj_ops *ops) +{ + struct casdsk_exp_obj *exp_obj; + struct request_queue *queue; + struct gendisk *gd; + int result = 0; + + BUG_ON(!owner); + BUG_ON(!dsk); + BUG_ON(!ops); + BUG_ON(dsk->exp_obj); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + if (strlen(dev_name) >= DISK_NAME_LEN) + return -EINVAL; + + result = casdsk_exp_obj_alloc(dsk); + if (result) + goto error_alloc; + + exp_obj = dsk->exp_obj; + + exp_obj->dev_name = kstrdup(dev_name, GFP_KERNEL); + if (!exp_obj->dev_name) { + __casdsk_exp_obj_release(exp_obj); + result = -ENOMEM; + goto error_strdup; + } + + result = _casdsk_exp_obj_init_kobject(dsk); + if (result) { + __casdsk_exp_obj_release(exp_obj); + goto error_kobject; + } + + if (!try_module_get(owner)) { + CASDSK_DEBUG_DISK_ERROR(dsk, "Cannot get reference to module"); + result = -ENAVAIL; + goto error_module; + } + exp_obj->owner = owner; + exp_obj->ops = ops; + + gd = alloc_disk(1); + if (!gd) { + result = -ENOMEM; + goto error_alloc_disk; + } + exp_obj->gd = gd; + + result = _casdsk_exp_obj_set_dev_t(dsk, gd); + if (result) + goto error_dev_t; + + spin_lock_init(&exp_obj->rq_lock); + + queue = blk_init_queue(_casdsk_exp_obj_request_fn, &exp_obj->rq_lock); + if (!queue) { + result = -ENOMEM; + goto error_init_queue; + } + BUG_ON(queue->queuedata); + queue->queuedata = dsk; + exp_obj->queue = queue; + + gd->fops = &_casdsk_exp_obj_ops; + gd->queue = queue; + gd->private_data = dsk; + strlcpy(gd->disk_name, exp_obj->dev_name, sizeof(gd->disk_name)); + + if (exp_obj->ops->prepare_queue) { + result = exp_obj->ops->prepare_queue(dsk, queue, dsk->private); + if (result) + goto error_prepare_queue; + } + + blk_queue_prep_rq(queue, _casdsk_exp_obj_prep_rq_fn); + + dsk->exp_obj->mk_rq_fn = queue->make_request_fn; + blk_queue_make_request(queue, _casdsk_exp_obj_make_rq_fn); + + if (exp_obj->ops->set_geometry) { + result = exp_obj->ops->set_geometry(dsk, dsk->private); + if (result) + goto error_set_geometry; + } + + return 0; + +error_set_geometry: + if (exp_obj->ops->cleanup_queue) + exp_obj->ops->cleanup_queue(dsk, queue, dsk->private); +error_prepare_queue: + blk_cleanup_queue(queue); +error_init_queue: + _casdsk_exp_obj_clear_dev_t(dsk); +error_dev_t: + put_disk(gd); +error_alloc_disk: + module_put(owner); + dsk->exp_obj->owner = NULL; +error_module: + casdsk_exp_obj_free(dsk); +error_kobject: +error_strdup: + dsk->exp_obj = NULL; +error_alloc: + return result; +} +EXPORT_SYMBOL(casdsk_exp_obj_create); + +struct request_queue *casdsk_exp_obj_get_queue(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + return dsk->exp_obj->queue; +} +EXPORT_SYMBOL(casdsk_exp_obj_get_queue); + +struct gendisk *casdsk_exp_obj_get_gendisk(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + return dsk->exp_obj->gd; +} +EXPORT_SYMBOL(casdsk_exp_obj_get_gendisk); + +static bool _casdsk_exp_obj_exists(const char *path) +{ + struct file *exported; + + exported = filp_open(path, O_RDONLY, 0); + + if (!exported || IS_ERR(exported)) { + /*failed to open file - it is safe to assume, + * it does not exist + */ + return false; + } + + filp_close(exported, NULL); + return true; +} + +int casdsk_exp_obj_activate(struct casdsk_disk *dsk) +{ + char *path; + int result; + + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + BUG_ON(!dsk->exp_obj->gd); + BUG_ON(dsk->exp_obj->activated); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!path) + return -ENOMEM; + + snprintf(path, PATH_MAX, "/dev/%s", dsk->exp_obj->dev_name); + if (_casdsk_exp_obj_exists(path)) { + printk(CASDSK_KERN_ERR "Could not activate exported object, " + "because file %s exists.\n", path); + kfree(path); + return -EEXIST; + } + kfree(path); + + dsk->exp_obj->activated = true; + atomic_set(&dsk->mode, CASDSK_MODE_ATTACHED); + add_disk(dsk->exp_obj->gd); + + result = bd_claim_by_disk(dsk->bd, dsk, dsk->exp_obj->gd); + if (result) + goto error_bd_claim; + + result = sysfs_create_link(&dsk->exp_obj->kobj, + &disk_to_dev(dsk->exp_obj->gd)->kobj, + "blockdev"); + if (result) + goto error_sysfs_link; + + CASDSK_DEBUG_DISK(dsk, "Activated exp object %s", dsk->exp_obj->dev_name); + + return 0; + +error_sysfs_link: + bd_release_from_disk(dsk->bd, dsk->exp_obj->gd); +error_bd_claim: + del_gendisk(dsk->exp_obj->gd); + dsk->exp_obj->activated = false; + return result; +} +EXPORT_SYMBOL(casdsk_exp_obj_activate); + +bool casdsk_exp_obj_activated(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + return dsk->exp_obj->activated; +} +EXPORT_SYMBOL(casdsk_exp_obj_activated); + +int casdsk_exp_obj_lock(struct casdsk_disk *dsk) +{ + struct casdsk_exp_obj *exp_obj; + + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + exp_obj = dsk->exp_obj; + + exp_obj->locked_bd = bdget_disk(exp_obj->gd, 0); + if (!exp_obj->locked_bd) + return -ENAVAIL; + + mutex_lock(&exp_obj->locked_bd->bd_mutex); + + if (exp_obj->locked_bd->bd_openers) { + printk(CASDSK_KERN_DEBUG "Device %s in use (openers=%d). Refuse to stop\n", + exp_obj->locked_bd->bd_disk->disk_name, + exp_obj->locked_bd->bd_openers); + + casdsk_exp_obj_unlock(dsk); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL(casdsk_exp_obj_lock); + +int casdsk_exp_obj_unlock(struct casdsk_disk *dsk) +{ + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + BUG_ON(!dsk->exp_obj->locked_bd); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + mutex_unlock(&dsk->exp_obj->locked_bd->bd_mutex); + bdput(dsk->exp_obj->locked_bd); + dsk->exp_obj->locked_bd = NULL; + + return 0; +} +EXPORT_SYMBOL(casdsk_exp_obj_unlock); + +int casdsk_exp_obj_destroy(struct casdsk_disk *dsk) +{ + struct casdsk_exp_obj *exp_obj; + + BUG_ON(!dsk); + BUG_ON(!dsk->exp_obj); + BUG_ON(!dsk->exp_obj->locked_bd); + + CASDSK_DEBUG_DISK_TRACE(dsk); + + exp_obj = dsk->exp_obj; + + if (casdsk_exp_obj_activated(dsk)) { + sysfs_remove_link(&exp_obj->kobj, "blockdev"); + bd_release_from_disk(dsk->bd, exp_obj->gd); + _casdsk_exp_obj_clear_dev_t(dsk); + del_gendisk(exp_obj->gd); + } + + if (exp_obj->queue) + blk_cleanup_queue(exp_obj->queue); + + atomic_set(&dsk->mode, CASDSK_MODE_UNKNOWN); + put_disk(exp_obj->gd); + + return 0; + +} +EXPORT_SYMBOL(casdsk_exp_obj_destroy); + +int casdsk_exp_obj_dettach(struct casdsk_disk *dsk) +{ + module_put(dsk->exp_obj->owner); + + dsk->exp_obj->owner = NULL; + dsk->exp_obj->ops = NULL; + + return 0; +} + +int casdsk_exp_obj_attach(struct casdsk_disk *dsk, struct module *owner, + struct casdsk_exp_obj_ops *ops) +{ + if (!try_module_get(owner)) { + CASDSK_DEBUG_DISK_ERROR(dsk, "Cannot get reference to module"); + return -ENAVAIL; + } + dsk->exp_obj->owner = owner; + dsk->exp_obj->ops = ops; + + return 0; +} + +static void _casdsk_exp_obj_wait_for_pending_rqs(struct casdsk_disk *dsk) +{ + int i, ncpus; + struct casdsk_exp_obj *exp_obj = dsk->exp_obj; + + ncpus = num_online_cpus(); + for (i = 0; i < ncpus; i++) + while (atomic_read(&exp_obj->pending_rqs[i])) + schedule(); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0) +static void _casdsk_exp_obj_drain_elevator(struct request_queue *q) +{ + if (q->elevator && q->elevator->elevator_type) + while (q->elevator->elevator_type->ops. + elevator_dispatch_fn(q, 1)) + ; +} +#elif LINUX_VERSION_CODE <= KERNEL_VERSION(4, 10, 0) +static void _casdsk_exp_obj_drain_elevator(struct request_queue *q) +{ + if (q->elevator && q->elevator->type) + while (q->elevator->type->ops.elevator_dispatch_fn(q, 1)) + ; +} +#else +static void _casdsk_exp_obj_drain_elevator(struct request_queue *q) +{ + if (q->elevator && q->elevator->type) + while (q->elevator->type->ops.sq.elevator_dispatch_fn(q, 1)) + ; +} +#endif + +static void _casdsk_exp_obj_flush_queue(struct casdsk_disk *dsk) +{ + struct casdsk_exp_obj *exp_obj = dsk->exp_obj; + struct request_queue *q = exp_obj->queue; + + spin_lock_irq(q->queue_lock); + _casdsk_exp_obj_drain_elevator(q); + spin_unlock_irq(q->queue_lock); + + blk_run_queue(q); + blk_sync_queue(q); +} + +void casdsk_exp_obj_prepare_pt(struct casdsk_disk *dsk) +{ + _casdsk_exp_obj_wait_for_pending_rqs(dsk); + _casdsk_exp_obj_flush_queue(dsk); +} + +void casdsk_exp_obj_prepare_attached(struct casdsk_disk *dsk) +{ + _casdsk_exp_obj_wait_for_pending_rqs(dsk); + + while (atomic_read(&dsk->exp_obj->pt_ios)) + schedule_timeout(msecs_to_jiffies(200)); +} + +void casdsk_exp_obj_prepare_shutdown(struct casdsk_disk *dsk) +{ + _casdsk_exp_obj_wait_for_pending_rqs(dsk); + + while (atomic_read(&dsk->exp_obj->pt_ios)) + schedule_timeout(msecs_to_jiffies(200)); +} diff --git a/modules/cas_disk/exp_obj.h b/modules/cas_disk/exp_obj.h new file mode 100644 index 000000000..e598d1973 --- /dev/null +++ b/modules/cas_disk/exp_obj.h @@ -0,0 +1,59 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CASDISK_EXP_OBJ_H__ +#define __CASDISK_EXP_OBJ_H__ + +#include +#include + +struct casdsk_disk; + +struct casdsk_exp_obj_pt_io_ctx { + struct casdsk_disk *dsk; + struct bio *bio; +}; + +struct casdsk_exp_obj { + + struct gendisk *gd; + struct request_queue *queue; + spinlock_t rq_lock; + + struct block_device *locked_bd; + + struct module *owner; + + bool activated; + + struct casdsk_exp_obj_ops *ops; + + make_request_fn *mk_rq_fn; + + const char *dev_name; + struct kobject kobj; + + atomic_t pt_ios; + atomic_t *pending_rqs; +}; + +int __init casdsk_init_exp_objs(void); +void casdsk_deinit_exp_objs(void); + +void casdsk_exp_obj_free(struct casdsk_disk *dsk); + +int casdsk_exp_obj_dettach(struct casdsk_disk *dsk); +int casdsk_exp_obj_attach(struct casdsk_disk *dsk, struct module *owner, + struct casdsk_exp_obj_ops *ops); +void casdsk_exp_obj_prepare_pt(struct casdsk_disk *dsk); +void casdsk_exp_obj_prepare_attached(struct casdsk_disk *dsk); + +void casdsk_exp_obj_prepare_shutdown(struct casdsk_disk *dsk); + +static inline struct casdsk_exp_obj *casdsk_kobj_to_exp_obj(struct kobject *kobj) +{ + return container_of(kobj, struct casdsk_exp_obj, kobj); +} + +#endif diff --git a/modules/cas_disk/exp_obj.o.ur-safe b/modules/cas_disk/exp_obj.o.ur-safe new file mode 100644 index 000000000..288700eef --- /dev/null +++ b/modules/cas_disk/exp_obj.o.ur-safe @@ -0,0 +1,2 @@ +/home/robert/work/cas/ICAS_Linux/modules/cas_disk/exp_obj.o-.text-f20 +/home/robert/work/cas/ICAS_Linux/modules/cas_disk/exp_obj.o-.text-f27 diff --git a/modules/cas_disk/main.c b/modules/cas_disk/main.c new file mode 100644 index 000000000..21a5218a9 --- /dev/null +++ b/modules/cas_disk/main.c @@ -0,0 +1,165 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#include +#include +#include +#include "cas_disk_defs.h" +#include "cas_disk.h" +#include "disk.h" +#include "exp_obj.h" +#include "sysfs.h" + +/* Layer information. */ +MODULE_AUTHOR("Intel(R) Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(CAS_VERSION); + +static int iface_version = CASDSK_IFACE_VERSION; +module_param(iface_version, int, (S_IRUSR | S_IRGRP)); + +static int upgrade_in_progress = 0; +module_param(upgrade_in_progress, int, (S_IRUSR | S_IRGRP)); + +struct casdsk_module *casdsk_module; + +uint32_t casdsk_get_version(void) +{ + return CASDSK_IFACE_VERSION; +} +EXPORT_SYMBOL(casdsk_get_version); + +static void _casdsk_module_free_config(struct casdsk_module *mod) +{ + int i; + + if (mod->config.blobs) { + for (i = 0; i < mod->config.n_blobs; i++) + vfree(mod->config.blobs[i].buffer); + kfree(mod->config.blobs); + + mod->config.blobs = NULL; + mod->config.n_blobs = 0; + } +} + +void casdsk_store_config(size_t n_blobs, struct casdsk_props_conf *blobs) +{ + upgrade_in_progress = 1; + _casdsk_module_free_config(casdsk_module); + casdsk_module->config.blobs = blobs; + casdsk_module->config.n_blobs = n_blobs; +} +EXPORT_SYMBOL(casdsk_store_config); + +size_t casdsk_get_stored_config(struct casdsk_props_conf **blobs) +{ + BUG_ON(!blobs); + + *blobs = casdsk_module->config.blobs; + return casdsk_module->config.n_blobs; +} +EXPORT_SYMBOL(casdsk_get_stored_config); + +void casdsk_free_stored_config(void) +{ + CASDSK_DEBUG_TRACE(); + _casdsk_module_free_config(casdsk_module); + upgrade_in_progress = 0; +} +EXPORT_SYMBOL(casdsk_free_stored_config); + +static void _casdsk_module_release(struct kobject *kobj) +{ + struct casdsk_module *mod; + + CASDSK_DEBUG_TRACE(); + + BUG_ON(!kobj); + + mod = container_of(kobj, struct casdsk_module, kobj); + BUG_ON(!mod); + + _casdsk_module_free_config(mod); + + kfree(mod); +} + +static struct kobj_type _casdsk_module_ktype = { + .release = _casdsk_module_release, +}; + +static int __init casdsk_init_kobjects(void) +{ + int result = 0; + + CASDSK_DEBUG_TRACE(); + + kobject_init(&casdsk_module->kobj, &_casdsk_module_ktype); + result = kobject_add(&casdsk_module->kobj, kernel_kobj, "cas_disk"); + if (result) + CASDSK_DEBUG_ERROR("Cannot register kobject"); + + return result; +} + +static int __init casdsk_init_module(void) +{ + int result = 0; + + casdsk_module = kzalloc(sizeof(*casdsk_module), GFP_KERNEL); + if (!casdsk_module) { + result = -ENOMEM; + goto error_kmalloc; + } + + mutex_init(&casdsk_module->lock); + + mutex_lock(&casdsk_module->lock); + + result = casdsk_init_exp_objs(); + if (result) + goto error_init_exp_objs; + + result = casdsk_init_disks(); + if (result) + goto error_init_disks; + + result = casdsk_init_kobjects(); + if (result) + goto error_kobjects; + + mutex_unlock(&casdsk_module->lock); + + printk(CASDSK_KERN_INFO "%s Version %s (%s)::Module loaded successfully\n", + CASDSK_LOGO, CAS_VERSION, CAS_KERNEL); + + return result; + +error_kobjects: + casdsk_deinit_disks(); +error_init_disks: + casdsk_deinit_exp_objs(); +error_init_exp_objs: + mutex_unlock(&casdsk_module->lock); + kfree(casdsk_module); +error_kmalloc: + return result; +} +module_init(casdsk_init_module); + +static void __exit casdsk_deinit_kobjects(void) +{ + kobject_put(&casdsk_module->kobj); +} + +static void __exit casdsk_exit_module(void) +{ + casdsk_disk_shutdown_all(); + casdsk_deinit_disks(); + casdsk_deinit_exp_objs(); + casdsk_deinit_kobjects(); +} +module_exit(casdsk_exit_module); diff --git a/modules/cas_disk/sysfs.c b/modules/cas_disk/sysfs.c new file mode 100644 index 000000000..a501e364a --- /dev/null +++ b/modules/cas_disk/sysfs.c @@ -0,0 +1,35 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#include "cas_disk_defs.h" +#include "sysfs.h" + +static ssize_t _casdsk_sysfs_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct casdsk_attribute *casdsk_attr = + container_of(attr, struct casdsk_attribute, attr); + + if (!casdsk_attr->show) + return -EIO; + + return casdsk_attr->show(kobj, page); +} + +static ssize_t _casdsk_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct casdsk_attribute *casdsk_attr = + container_of(attr, struct casdsk_attribute, attr); + + if (!casdsk_attr->store) + return -EIO; + + return casdsk_attr->store(kobj, buf, len); +} + +const struct sysfs_ops casdsk_sysfs_ops = { + .show = _casdsk_sysfs_show, + .store = _casdsk_sysfs_store +}; diff --git a/modules/cas_disk/sysfs.h b/modules/cas_disk/sysfs.h new file mode 100644 index 000000000..8499926fa --- /dev/null +++ b/modules/cas_disk/sysfs.h @@ -0,0 +1,21 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ +#ifndef __CASDISK_SYSFS_H__ +#define __CASDISK_SYSFS_H__ + +#include +#include + +struct casdsk_disk; + +struct casdsk_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, char *page); + ssize_t (*store)(struct kobject *kobj, const char *buf, size_t len); +}; + +extern const struct sysfs_ops casdsk_sysfs_ops; + +#endif diff --git a/modules/config.mk b/modules/config.mk new file mode 100644 index 000000000..14dcd7d79 --- /dev/null +++ b/modules/config.mk @@ -0,0 +1,94 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +VERSION_FILE := $(M)/CAS_VERSION + +$(VERSION_FILE): + ./CAS_VERSION_GEN + +check_cflag=$(shell echo "" | \ + gcc -c -xc ${1} -o /dev/null - 2>/dev/null; \ + if [ $$? -eq 0 ]; then echo 1; else echo 0; fi; ) + +-include $(VERSION_FILE) +EXTRA_CFLAGS += -DCAS_VERSION_MAIN=$(CAS_VERSION_MAIN) +EXTRA_CFLAGS += -DCAS_VERSION_MAJOR=$(CAS_VERSION_MAJOR) +EXTRA_CFLAGS += -DCAS_VERSION_MINOR=$(CAS_VERSION_MINOR) +EXTRA_CFLAGS += -DCAS_BUILD_NO=\"$(CAS_BUILD_NO)\" +ifeq ($(strip $(CAS_BUILD_FLAG)),) +else +EXTRA_CFLAGS += -DCAS_BUILD_FLAG=\"$(CAS_BUILD_FLAG)\" +endif +EXTRA_CFLAGS += -O2 -D_FORTIFY_SOURCE=2 -Wformat -Wformat-security + +EXTRA_CFLAGS += -I$(M) +EXTRA_CFLAGS += -I$(M)/cas_cache +EXTRA_CFLAGS += -I$(M)/include +EXTRA_CFLAGS += -DCAS_KERNEL=\"$(KERNELRELEASE)\" + +check_header=$(shell echo "\#include <${1}>" | \ + gcc -c -xc -o /dev/null - 2>/dev/null; \ + if [ $$? -eq 0 ]; then echo 1; else echo 0; fi; ) + +INCDIR = $(PWD)/include + +NVME_FULL = 0 + +SLES ?= $(shell cat /etc/SuSE-release 2>/dev/null) +ifneq ($(SLES),) +EXTRA_CFLAGS += -DCAS_UAPI_LINUX_NVME_IOCTL +EXTRA_CFLAGS += -DCAS_NVME_PARTIAL +EXTRA_CFLAGS += -DCAS_SLES +SLES_VERSION := $(shell cat /etc/os-release |\ + sed -n 's/VERSION="\([0-9]\+\)-\(.\+\)"/\1\2/p') +EXTRA_CFLAGS += -DCAS_SLES$(SLES_VERSION) +INCDIR = "" +endif + +ifeq ($(call check_header,$(INCDIR)/uapi/nvme.h), 1) +EXTRA_CFLAGS += -DCAS_UAPI_NVME_IOCTL +EXTRA_CFLAGS += -DCAS_UAPI_NVME +EXTRA_CFLAGS += -DCAS_NVME_PARTIAL +endif + +ifeq ($(call check_header,$(INCDIR)/uapi/linux/nvme.h), 1) +EXTRA_CFLAGS += -DCAS_UAPI_LINUX_NVME +EXTRA_CFLAGS += -DCAS_NVME_PARTIAL +endif + +ifeq ($(call check_header,$(INCDIR)/uapi/linux/nvme_ioctl.h), 1) +EXTRA_CFLAGS += -DCAS_UAPI_LINUX_NVME_IOCTL +EXTRA_CFLAGS += -DCAS_NVME_PARTIAL +ifeq ($(shell cat /etc/redhat-release 2>/dev/null | grep "\(Red Hat\|CentOS\) [a-zA-Z ]* 7\.[45]" | wc -l), 1) +NVME_FULL = 1 +endif +endif + +KERNEL_VERSION = $(shell echo $(KERNELRELEASE) | cut -d'.' -f1) +KERNEL_MAJOR = $(shell echo $(KERNELRELEASE) | cut -d'.' -f2) + +ifeq ($(shell expr $(KERNEL_VERSION) \>= 4 \& $(KERNEL_MAJOR) \> 11),1) +NVME_FULL = 0 +endif + +ifeq ($(NVME_FULL),1) +EXTRA_CFLAGS += -DCAS_NVME_FULL +endif + +EXTRA_CFLAGS += -Werror + +EXTRA_LDFLAGS += -z noexecstack -z relro -z now + +# workaround for missing objtool in kernel devel package +ifeq ($(shell expr $(KERNEL_VERSION) == 4 \& $(KERNEL_MAJOR) == 14),1) +ifeq ($(CONFIG_STACK_VALIDATION), y) +OBJTOOL=$(shell [ -f tools/objtool/objtool ] && echo "y") +ifneq ($(OBJTOOL), y) +CONFIG_STACK_VALIDATION= +endif +endif +endif + +-include $(M)/extra.mk diff --git a/modules/extra.mk b/modules/extra.mk new file mode 100644 index 000000000..7437ed3fa --- /dev/null +++ b/modules/extra.mk @@ -0,0 +1,23 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# +ifneq ($(KERNELRELEASE),) + +ifeq ($(CAS_EXT_EXP),1) +EXTRA_CFLAGS += -DWI_AVAILABLE +endif + +else #KERNELRELEASE + +.PHONY: sync distsync + +sync: + @cd $(OCFDIR) && $(MAKE) inc O=$(PWD) + @cd $(OCFDIR) && $(MAKE) src O=$(PWD)/cas_cache + +distsync: + @cd $(OCFDIR) && $(MAKE) distclean O=$(PWD) + @cd $(OCFDIR) && $(MAKE) distclean O=$(PWD)/cas_cache + +endif diff --git a/modules/include/cas_ioctl_codes.h b/modules/include/cas_ioctl_codes.h new file mode 100644 index 000000000..6f9ca3e58 --- /dev/null +++ b/modules/include/cas_ioctl_codes.h @@ -0,0 +1,572 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_IOCTL_CODES_H__ +#define __CAS_IOCTL_CODES_H__ +/** + * @file + * @brief Main file for ioctl interface between kernel module and userspace component. + * + * This file contains IOCTL commands, structured passed as parameters to said commands + * and documentation of CAS specific extended error codes (that are a bit more verbose than + * standard errno) + */ + +#include "ocf/ocf.h" +#include + +/** + * Max path, string size + */ +#define MAX_STR_LEN PATH_MAX + +/** + * Max size of elevator name (including null terminator) + */ +#define MAX_ELEVATOR_NAME 16 + +/** \cond SKIP_IN_DOC */ +#define CACHE_LIST_ID_LIMIT 20 + +#define INVALID_FLUSH_PARAM -1 +/** \endcond */ + +#define CACHE_INIT_NEW 0 /**< initialize new metadata from fresh start */ +#define CACHE_INIT_LOAD 1 /**< load existing metadata */ + +struct kcas_start_cache { + /** + * id of newely inserted cache (in range 1-OCF_CACHE_ID_MAX). + */ + ocf_cache_id_t cache_id; + + /** + * cache initialization mode + * valid choices are: + * * CACHE_INIT_NEW + * * CACHE_INIT_LOAD + */ + uint8_t init_cache; + + char cache_path_name[MAX_STR_LEN]; /**< path to an ssd*/ + + /** + * caching mode for new cache instance + * valid choices are: + * * WRITE_THROUGH + * * WRITE_BACK + * * WRITE_AROUND + * * PASS_THROUGH + */ + ocf_cache_mode_t caching_mode; + + /** + * eviction policy to be used for newely configured cache instance. + */ + ocf_eviction_t eviction_policy; + + uint8_t flush_data; /**< should data be flushed? */ + + /** + * cache line size + */ + ocf_cache_line_size_t line_size; + + uint8_t force; /**< should force option be used? */ + + uint64_t min_free_ram; /**< Minimum free RAM memory for cache metadata */ + + uint8_t metadata_mode_optimal; /**< Current metadata mode is optimal */ + + char cache_elevator[MAX_ELEVATOR_NAME]; + + int ext_err_code; +}; + +struct kcas_stop_cache { + ocf_cache_id_t cache_id; /**< id of cache to be stopped */ + + uint8_t flush_data; /**< should data be flushed? */ + + int ext_err_code; +}; + +struct kcas_set_cache_state { + ocf_cache_id_t cache_id; /**< id of cache for which state should be set */ + + /** + * caching mode for new cache instance + * valid choices are: + * * WRITE_THROUGH + * * WRITE_BACK + * * WRITE_AROUND + * * PASS_THROUGH + */ + ocf_cache_mode_t caching_mode; + + + uint8_t flush_data; /**< should data be flushed? */ + + int ext_err_code; +}; + +struct kcas_insert_core { + ocf_cache_id_t cache_id; /**< id of an running cache */ + ocf_core_id_t core_id; /**< id of newely inserted core object */ + char core_path_name[MAX_STR_LEN]; /**< path to a core object */ + bool try_add; /**< add core to pool if cache isn't present */ + bool update_path; /**< provide alternative path for core device */ + + int ext_err_code; +}; + +struct kcas_remove_core { + ocf_cache_id_t cache_id; /**< id of an running cache */ + ocf_core_id_t core_id; /**< id core object to be removed */ + bool force_no_flush; /**< remove core without flushing */ + bool detach; /**< detach core without removing it from cache metadata */ + + int ext_err_code; +}; + +struct kcas_reset_stats { + ocf_cache_id_t cache_id; /**< id of an running cache */ + ocf_core_id_t core_id; /**< id core object to be removed */ + + int ext_err_code; +}; + +struct kcas_flush_cache { + ocf_cache_id_t cache_id; /**< id of an running cache */ + + int ext_err_code; +}; + +struct kcas_interrupt_flushing { + ocf_cache_id_t cache_id; /**< id of an running cache */ + + int ext_err_code; +}; + +struct kcas_flush_core { + ocf_cache_id_t cache_id; /**< id of an running cache */ + ocf_core_id_t core_id; /**< id core object to be removed */ + + int ext_err_code; +}; + +struct kcas_cache_info { + /** id of a cache */ + ocf_cache_id_t cache_id; + + /** path to caching device */ + char cache_path_name[MAX_STR_LEN]; + + /** + * IDs of cores associated with this cache. + */ + ocf_core_id_t core_id[OCF_CORE_MAX]; + + struct ocf_cache_info info; + + uint8_t metadata_mode; /**< metadata mode (normal/atomic) */ + + int ext_err_code; +}; + +struct kcas_core_info { + /** Path name to underlying device */ + char core_path_name[MAX_STR_LEN]; + + /** Cache id */ + ocf_cache_id_t cache_id; + + /** Core id */ + ocf_core_id_t core_id; + + /** CAS statistics of core */ + struct ocf_stats_core stats; + + ocf_core_state_t state; + + int ext_err_code; +}; + +struct kcas_core_pool_path { + /** Handler to tab with cores path*/ + char *core_path_tab; + + /** Number of cores in core pool */ + int core_pool_count; + + int ext_err_code; +}; + +struct kcas_cache_count { + /** Number of running caches */ + int cache_count; + + int ext_err_code; +}; + +struct kcas_core_pool_count { + /** Number of cores in core pool */ + int core_pool_count; + + int ext_err_code; +}; + +/** + * IO class info and statistics + */ +struct kcas_io_class { + /** Cache ID */ + ocf_cache_id_t cache_id; + + /** Core ID */ + ocf_core_id_t core_id; + + /** IO class id for which info will be retrieved */ + uint32_t class_id; + + /** IO class info */ + struct ocf_io_class_info info; + + /** Flag indicating if partition counters should be fetched. */ + uint8_t get_stats; + + /** IO class statistics */ + struct ocf_stats_io_class stats; + + int ext_err_code; +}; + +/** + * IO class settings + */ +struct kcas_io_classes { + /** Cache ID */ + ocf_cache_id_t cache_id; + + int ext_err_code; + + /** IO class info */ + struct ocf_io_class_info info[]; +}; + +#define KCAS_IO_CLASSES_SIZE (sizeof(struct kcas_io_classes) \ + + OCF_IO_CLASS_MAX * sizeof(struct ocf_io_class_info)) + +/** + * structure in which result of KCAS_IOCTL_LIST_CACHE is supplied from kernel module. + */ +struct kcas_cache_list { + /** starting position in dev list for getting cache id */ + uint32_t id_position; + /** requested number of ids and returned in response cmd */ + uint32_t in_out_num; + /** array with cache list and its properties */ + ocf_cache_id_t cache_id_tab[CACHE_LIST_ID_LIMIT]; + + int ext_err_code; +}; + +/** + * CAS capabilities. + */ +struct kcas_capabilites { + uint8_t nvme_format : 1; + /**< NVMe format support */ + + int ext_err_code; +}; + +struct kcas_upgrade { + int ext_err_code; +}; + +/** + * Format NVMe namespace. + */ +#define CAS_METADATA_MODE_NORMAL 0 +#define CAS_METADATA_MODE_ATOMIC 1 +#define CAS_METADATA_MODE_INVALID 255 + +struct kcas_nvme_format { + char device_path_name[MAX_STR_LEN]; /**< path to NVMe device*/ + int metadata_mode; /**< selected metadata mode */ + int force; + + int ext_err_code; +}; + +struct kcas_core_pool_remove { + char core_path_name[MAX_STR_LEN]; /**< path to a core object */ + + int ext_err_code; +}; + +struct kcas_cache_check_device { + char path_name[MAX_STR_LEN]; /**< path to a device */ + bool is_cache_device; + bool clean_shutdown; + bool cache_dirty; + bool format_atomic; + + int ext_err_code; +}; + +enum kcas_core_param_id { + core_param_seq_cutoff_threshold, + core_param_seq_cutoff_policy, + core_param_id_max, +}; + +struct kcas_set_core_param { + ocf_cache_id_t cache_id; + ocf_core_id_t core_id; + enum kcas_core_param_id param_id; + uint32_t param_value; + + int ext_err_code; +}; + +struct kcas_get_core_param { + ocf_cache_id_t cache_id; + ocf_core_id_t core_id; + enum kcas_core_param_id param_id; + uint32_t param_value; + + int ext_err_code; +}; + +enum kcas_cache_param_id { + cache_param_cleaning_policy_type, + cache_param_cleaning_alru_wake_up_time, + cache_param_cleaning_alru_stale_buffer_time, + cache_param_cleaning_alru_flush_max_buffers, + cache_param_cleaning_alru_activity_threshold, + cache_param_cleaning_acp_wake_up_time, + cache_param_cleaning_acp_flush_max_buffers, + cache_param_id_max, +}; + +struct kcas_set_cache_param { + ocf_cache_id_t cache_id; + enum kcas_cache_param_id param_id; + uint32_t param_value; + + int ext_err_code; +}; + +struct kcas_get_cache_param { + ocf_cache_id_t cache_id; + enum kcas_cache_param_id param_id; + uint32_t param_value; + + int ext_err_code; +}; + +/******************************************************************************* + * CODE * NAME * STATUS * + ******************************************************************************* + * 1 * KCAS_IOCTL_START_CACHE * DEPRECATED * + * 2 * KCAS_IOCTL_STOP_CACHE * OK * + * 3 * KCAS_IOCTL_SET_CACHE_STATE * OK * + * 4 * KCAS_IOCTL_INSERT_CORE * DEPRECATED * + * 5 * KCAS_IOCTL_REMOVE_CORE * DEPRECATED * + * 6 * KCAS_IOCTL_RESET_STATS * OK * + * 7 * KCAS_IOCTL_SET_CLEANING_PARAMETERS * DEPRECATED * + * 8 * KCAS_IOCTL_GET_CLEANING_PARAMETERS * DEPRECATED * + * 9 * KCAS_IOCTL_FLUSH_CACHE * OK * + * 10 * KCAS_IOCTL_INTERRUPT_FLUSHING * OK * + * 11 * KCAS_IOCTL_FLUSH_CORE * OK * + * 12 * KCAS_IOCTL_CACHE_INFO * DEPRECATED * + * 13 * KCAS_IOCTL_CORE_INFO * DEPRECATED * + * 14 * KCAS_IOCTL_PARTITION_STATS * OK * + * 15 * KCAS_IOCTL_PARTITION_SET * OK * + * 16 * KCAS_IOCTL_GET_CACHE_COUNT * OK * + * 17 * KCAS_IOCTL_LIST_CACHE * OK * + * 18 * KCAS_IOCTL_GET_CAPABILITIES * OK * + * 19 * KCAS_IOCTL_UPGRADE * OK * + * 20 * KCAS_IOCTL_NVME_FORMAT * OK * + * 21 * KCAS_IOCTL_START_CACHE * OK * + * 22 * KCAS_IOCTL_INSERT_CORE * OK * + * 23 * KCAS_IOCTL_REMOVE_CORE * OK * + * 24 * KCAS_IOCTL_CACHE_INFO * OK * + * 25 * KCAS_IOCTL_CORE_INFO * OK * + * 26 * KCAS_IOCTL_GET_CORE_POOL_COUNT * OK * + * 27 * KCAS_IOCTL_GET_CORE_POOL_PATHS * OK * + * 28 * KCAS_IOCTL_CORE_POOL_REMOVE * OK * + * 29 * KCAS_IOCTL_CACHE_CHECK_DEVICE * OK * + * 30 * KCAS_IOCTL_SET_CORE_PARAM * OK * + * 31 * KCAS_IOCTL_GET_CORE_PARAM * OK * + * 32 * KCAS_IOCTL_SET_CACHE_PARAM * OK * + * 33 * KCAS_IOCTL_GET_CACHE_PARAM * OK * + ******************************************************************************* + */ + +/** \cond SKIP_IN_DOC */ +#define KCAS_IOCTL_MAGIC (0xBA) +/** \endcond */ + +/** Stop cache with or without flushing dirty data */ +#define KCAS_IOCTL_STOP_CACHE _IOWR(KCAS_IOCTL_MAGIC, 2, struct kcas_stop_cache) + +/** Set cache mode (write back, write through etc... */ +#define KCAS_IOCTL_SET_CACHE_STATE _IOR(KCAS_IOCTL_MAGIC, 3, struct kcas_set_cache_state) + +/** Reset statistic counters for given cache object */ +#define KCAS_IOCTL_RESET_STATS _IOR(KCAS_IOCTL_MAGIC, 6, struct kcas_reset_stats) + +/** Flush dirty data from an running cache instance that + * is or was running in write-back mode */ +#define KCAS_IOCTL_FLUSH_CACHE _IOWR(KCAS_IOCTL_MAGIC, 9, struct kcas_flush_cache) + +/** Interrupt dirty block flushing operation */ +#define KCAS_IOCTL_INTERRUPT_FLUSHING _IOWR(KCAS_IOCTL_MAGIC, 10, struct kcas_interrupt_flushing) + +/* Flush dirty data from an running core object + * that is or was running in write-back mode */ +#define KCAS_IOCTL_FLUSH_CORE _IOR(KCAS_IOCTL_MAGIC, 11, struct kcas_flush_core) + +/** Retrieving partition status for specified cache id and partition id */ +#define KCAS_IOCTL_PARTITION_STATS _IOWR(KCAS_IOCTL_MAGIC, 14, struct kcas_io_class) + +/** Configure partitions for specified cache id */ +#define KCAS_IOCTL_PARTITION_SET _IOWR(KCAS_IOCTL_MAGIC, 15, struct kcas_io_classes) + +/** Obtain number of valid cache ids within running open cas instance */ +#define KCAS_IOCTL_GET_CACHE_COUNT _IOR(KCAS_IOCTL_MAGIC, 16, struct kcas_cache_count) + +/** List valid cache ids within Open CAS module */ +#define KCAS_IOCTL_LIST_CACHE _IOWR(KCAS_IOCTL_MAGIC, 17, struct kcas_cache_list) + +/** Provides capabilites of installed open cas module */ +#define KCAS_IOCTL_GET_CAPABILITIES _IOWR(KCAS_IOCTL_MAGIC, 18, struct kcas_capabilites) + +/** Start upgrade in flight procedure */ +#define KCAS_IOCTL_UPGRADE _IOR(KCAS_IOCTL_MAGIC, 19, struct kcas_upgrade) + +/** Format NVMe namespace to support selected metadata mode */ +#define KCAS_IOCTL_NVME_FORMAT _IOWR(KCAS_IOCTL_MAGIC, 20, struct kcas_nvme_format) + +/** Start new cache instance, load cache or recover cache */ +#define KCAS_IOCTL_START_CACHE _IOWR(KCAS_IOCTL_MAGIC, 21, struct kcas_start_cache) + +/** Add core object to an running cache instance */ +#define KCAS_IOCTL_INSERT_CORE _IOWR(KCAS_IOCTL_MAGIC, 22, struct kcas_insert_core) + +/** Remove core object from an running cache instance */ +#define KCAS_IOCTL_REMOVE_CORE _IOR(KCAS_IOCTL_MAGIC, 23, struct kcas_remove_core) + +/** Retrieve properties of a running cache instance (incl. mode etc.) */ +#define KCAS_IOCTL_CACHE_INFO _IOWR(KCAS_IOCTL_MAGIC, 24, struct kcas_cache_info) + +/** Rretrieve statisting of a given core object */ +#define KCAS_IOCTL_CORE_INFO _IOWR(KCAS_IOCTL_MAGIC, 25, struct kcas_core_info) + +/** Get core pool count */ +#define KCAS_IOCTL_GET_CORE_POOL_COUNT _IOR(KCAS_IOCTL_MAGIC, 26, struct kcas_core_pool_count) + +/** Ret paths from devices which are in core pool */ +#define KCAS_IOCTL_GET_CORE_POOL_PATHS _IOWR(KCAS_IOCTL_MAGIC, 27, struct kcas_core_pool_path) + +/** Remove device from core pool */ +#define KCAS_IOCTL_CORE_POOL_REMOVE _IOWR(KCAS_IOCTL_MAGIC, 28, struct kcas_core_pool_remove) + +/** Check if given device is initialized cache device */ +#define KCAS_IOCTL_CACHE_CHECK_DEVICE _IOWR(KCAS_IOCTL_MAGIC, 29, struct kcas_cache_check_device) + +/** Set various core runtime parameters */ +#define KCAS_IOCTL_SET_CORE_PARAM _IOW(KCAS_IOCTL_MAGIC, 30, struct kcas_set_core_param) + +/** Get various core runtime parameters */ +#define KCAS_IOCTL_GET_CORE_PARAM _IOW(KCAS_IOCTL_MAGIC, 31, struct kcas_get_core_param) + +/** Set various cache runtime parameters */ +#define KCAS_IOCTL_SET_CACHE_PARAM _IOW(KCAS_IOCTL_MAGIC, 32, struct kcas_set_cache_param) + +/** Get various cache runtime parameters */ +#define KCAS_IOCTL_GET_CACHE_PARAM _IOW(KCAS_IOCTL_MAGIC, 33, struct kcas_get_cache_param) + +/** + * Extended kernel CAS error codes + */ +enum kcas_error { + /** Must be root */ + KCAS_ERR_ROOT = 2000000, + + /** System Error */ + KCAS_ERR_SYSTEM, + + /** Range parameters are invalid */ + KCAS_ERR_BAD_RANGE, + + /** Illegal range, out of device space */ + KCAS_ERR_DEV_SPACE, + + /** Invalid ioctl */ + KCAS_ERR_INV_IOCTL, + + /** Device opens or mount are pending to this cache */ + KCAS_ERR_DEV_PENDING, + + /** NVMe Cache device contains dirty data. */ + KCAS_ERR_DIRTY_EXISTS_NVME, + + /** Could not create exported object because file in /dev directory + * exists + */ + KCAS_ERR_FILE_EXISTS, + + /** CAS is under upgrade */ + KCAS_ERR_IN_UPGRADE, + + /** Cache device sector size is greater than core device %s sector size + */ + KCAS_ERR_UNALIGNED, + + /** No caches configuration for upgrade in flight */ + KCAS_ERR_NO_STORED_CONF, + + /** Cannot roll-back previous configuration */ + KCAS_ERR_ROLLBACK, + + /** Device is not NVMe */ + KCAS_ERR_NOT_NVME, + + /** Failed to format NVMe device */ + KCAS_ERR_FORMAT_FAILED, + + /** NVMe is formatted to unsupported format */ + KCAS_ERR_NVME_BAD_FORMAT, + + /** Device contains partitions */ + KCAS_ERR_CONTAINS_PART, + + /** Given device is a partition */ + KCAS_ERR_A_PART, + + /** Core has been removed with flush error */ + KCAS_ERR_REMOVED_DIRTY, + + /** Cache has been stopped, but it may contain dirty data */ + KCAS_ERR_STOPPED_DIRTY, + + /** Core pool is not empty */ + KCAS_ERR_CORE_POOL_NOT_EMPTY, + + /** No caching device is attached */ + KCAS_ERR_NO_CACHE_ATTACHED, + + /** Invalid syntax of classification rule */ + KCAS_ERR_CLS_RULE_INVALID_SYNTAX, + + /** Condition token does not identify any known condition */ + KCAS_ERR_CLS_RULE_UNKNOWN_CONDITION, +}; + +#endif diff --git a/modules/include/cas_version.h b/modules/include/cas_version.h new file mode 100644 index 000000000..2632a7c8b --- /dev/null +++ b/modules/include/cas_version.h @@ -0,0 +1,30 @@ +/* +* Copyright(c) 2012-2019 Intel Corporation +* SPDX-License-Identifier: BSD-3-Clause-Clear +*/ + +#ifndef __CAS_VERSION_H__ +#define __CAS_VERSION_H__ + +#if !defined(CAS_BUILD_NO) +#error "You must define build number for version" +#endif + +#define STR_PREP(x) #x +#define PR_STR(x) STR_PREP(x) +#define FMT_VERSION(x) "0" PR_STR(x) + +#ifdef CAS_BUILD_FLAG +#define CAS_VERSION_FLAG "-"CAS_BUILD_FLAG +#else +#define CAS_VERSION_FLAG "" +#endif + +#define CAS_VERSION \ + FMT_VERSION(CAS_VERSION_MAIN) "." \ + FMT_VERSION(CAS_VERSION_MAJOR) "." \ + FMT_VERSION(CAS_VERSION_MINOR) "." \ + CAS_BUILD_NO \ + CAS_VERSION_FLAG + +#endif diff --git a/modules/tags b/modules/tags new file mode 100644 index 000000000..a474c6799 --- /dev/null +++ b/modules/tags @@ -0,0 +1,1422 @@ +!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/ +!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/ +!_TAG_PROGRAM_AUTHOR Darren Hiebert /dhiebert@users.sourceforge.net/ +!_TAG_PROGRAM_NAME Exuberant Ctags // +!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/ +!_TAG_PROGRAM_VERSION 5.9~svn20110310 // +ADMIN_TIMEOUT cas_cache/object/obj_atomic_dev_bottom.c 30;" d file: +ATOMIC_DEVICE_OBJECT cas_cache/cas_cache.h /^ ATOMIC_DEVICE_OBJECT, \/**< block device object with atomic$/;" e enum:object_type_t +ATOMIC_METADATA_MODE_ELBA cas_cache/object/obj_atomic_dev_bottom.h /^ ATOMIC_METADATA_MODE_ELBA,$/;" e enum:atomic_metadata_mode +ATOMIC_METADATA_MODE_NONE cas_cache/object/obj_atomic_dev_bottom.h /^ ATOMIC_METADATA_MODE_NONE,$/;" e enum:atomic_metadata_mode +ATOMIC_METADATA_MODE_SEPBUF cas_cache/object/obj_atomic_dev_bottom.h /^ ATOMIC_METADATA_MODE_SEPBUF,$/;" e enum:atomic_metadata_mode +BIO_BIIDX cas_cache/linux_kernel_version.h 257;" d +BIO_BIIDX cas_cache/linux_kernel_version.h 262;" d +BIO_BISECTOR cas_cache/linux_kernel_version.h 258;" d +BIO_BISECTOR cas_cache/linux_kernel_version.h 261;" d +BIO_BISIZE cas_cache/linux_kernel_version.h 256;" d +BIO_BISIZE cas_cache/linux_kernel_version.h 260;" d +BIO_ENDIO cas_cache/linux_kernel_version.h 69;" d +BIO_ENDIO cas_cache/linux_kernel_version.h 72;" d +BIO_OP_FLAGS cas_cache/linux_kernel_version.h 110;" d +BIO_OP_FLAGS cas_cache/linux_kernel_version.h 113;" d +BIO_OP_FLAGS_FORMAT cas_cache/linux_kernel_version.h 109;" d +BIO_OP_FLAGS_FORMAT cas_cache/linux_kernel_version.h 112;" d +BIO_OP_STATUS cas_cache/linux_kernel_version.h 63;" d +BIO_OP_STATUS cas_cache/linux_kernel_version.h 65;" d +BIO_RW_FLAGS cas_cache/linux_kernel_version.h 117;" d +BIO_RW_FLAGS cas_cache/linux_kernel_version.h 121;" d +BIO_SET_RW_FLAGS cas_cache/linux_kernel_version.h 119;" d +BIO_SET_RW_FLAGS cas_cache/linux_kernel_version.h 122;" d +BLK_RQ_BYTES cas_cache/object/obj_block_dev_top.c 9;" d file: +BLK_RQ_POS cas_cache/object/obj_block_dev_top.c 8;" d file: +BLOCK_CALLBACK_ERROR cas_cache/linux_kernel_version.h 81;" d +BLOCK_CALLBACK_ERROR cas_cache/linux_kernel_version.h 87;" d +BLOCK_CALLBACK_INIT cas_cache/linux_kernel_version.h 79;" d +BLOCK_CALLBACK_INIT cas_cache/linux_kernel_version.h 85;" d +BLOCK_CALLBACK_RETURN cas_cache/linux_kernel_version.h 80;" d +BLOCK_CALLBACK_RETURN cas_cache/linux_kernel_version.h 86;" d +BLOCK_DEVICE_OBJECT cas_cache/cas_cache.h /^ BLOCK_DEVICE_OBJECT = 1, \/**< block device object *\/$/;" e enum:object_type_t +BYTES_TO_PAGES cas_cache/cas_cache.h 43;" d +CACHE_ID_STR cas_cache/layer_upgrade.c 90;" d file: +CACHE_INIT_LOAD include/cas_ioctl_codes.h 37;" d +CACHE_INIT_NEW include/cas_ioctl_codes.h 36;" d +CACHE_LINE_SIZE_STR cas_cache/layer_upgrade.c 92;" d file: +CACHE_LIST_ID_LIMIT include/cas_ioctl_codes.h 31;" d +CACHE_MODE_STR cas_cache/layer_upgrade.c 94;" d file: +CACHE_MODULE Makefile /^CACHE_MODULE = cas_cache$/;" m +CACHE_PATH_STR cas_cache/layer_upgrade.c 91;" d file: +CACHE_TYPE_STR cas_cache/layer_upgrade.c 93;" d file: +CASDSK_BIO_HANDLED cas_disk/cas_disk.h 18;" d +CASDSK_BIO_NOT_HANDLED cas_disk/cas_disk.h 17;" d +CASDSK_DEBUG cas_disk/debug.h 8;" d +CASDSK_DEBUG_DISK cas_disk/debug.h 24;" d +CASDSK_DEBUG_DISK cas_disk/debug.h 40;" d +CASDSK_DEBUG_DISK_ERROR cas_disk/debug.h 32;" d +CASDSK_DEBUG_DISK_ERROR cas_disk/debug.h 42;" d +CASDSK_DEBUG_DISK_TRACE cas_disk/debug.h 14;" d +CASDSK_DEBUG_DISK_TRACE cas_disk/debug.h 37;" d +CASDSK_DEBUG_ERROR cas_disk/debug.h 29;" d +CASDSK_DEBUG_ERROR cas_disk/debug.h 41;" d +CASDSK_DEBUG_MSG cas_disk/debug.h 17;" d +CASDSK_DEBUG_MSG cas_disk/debug.h 38;" d +CASDSK_DEBUG_PARAM cas_disk/debug.h 20;" d +CASDSK_DEBUG_PARAM cas_disk/debug.h 39;" d +CASDSK_DEBUG_TRACE cas_disk/debug.h 11;" d +CASDSK_DEBUG_TRACE cas_disk/debug.h 36;" d +CASDSK_DEV_MINORS cas_disk/exp_obj.c 18;" d file: +CASDSK_DISK_OPEN_FMODE cas_disk/disk.c 15;" d file: +CASDSK_IFACE_VERSION cas_disk/cas_disk.h 13;" d +CASDSK_KERN_ALERT cas_disk/cas_disk_defs.h 46;" d +CASDSK_KERN_CRIT cas_disk/cas_disk_defs.h 47;" d +CASDSK_KERN_DEBUG cas_disk/cas_disk_defs.h 52;" d +CASDSK_KERN_EMERG cas_disk/cas_disk_defs.h 45;" d +CASDSK_KERN_ERR cas_disk/cas_disk_defs.h 48;" d +CASDSK_KERN_INFO cas_disk/cas_disk_defs.h 51;" d +CASDSK_KERN_NOTICE cas_disk/cas_disk_defs.h 50;" d +CASDSK_KERN_WARNING cas_disk/cas_disk_defs.h 49;" d +CASDSK_LOGO cas_disk/cas_disk_defs.h 41;" d +CASDSK_MODE_ATTACHED cas_disk/disk.h 17;" d +CASDSK_MODE_PT cas_disk/disk.h 16;" d +CASDSK_MODE_SHUTDOWN cas_disk/disk.h 18;" d +CASDSK_MODE_TRANSITION cas_disk/disk.h 19;" d +CASDSK_MODE_TRANS_TO_ATTACHED cas_disk/disk.h 20;" d +CASDSK_MODE_TRANS_TO_PT cas_disk/disk.h 21;" d +CASDSK_MODE_TRANS_TO_SHUTDOWN cas_disk/disk.h 23;" d +CASDSK_MODE_UNKNOWN cas_disk/disk.h 15;" d +CASDSK_PREFIX_LONG cas_disk/cas_disk_defs.h 43;" d +CASDSK_PREFIX_SHORT cas_disk/cas_disk_defs.h 42;" d +CAS_ALLOC_ALLOCATOR_LIMIT cas_cache/ocf_env.c 11;" d file: +CAS_ALLOC_PAGE_LIMIT cas_cache/context.c 17;" d file: +CAS_BIO_DISCARD cas_cache/linux_kernel_version.h 268;" d +CAS_BIO_DISCARD cas_cache/linux_kernel_version.h 273;" d +CAS_BIO_DISCARD cas_cache/linux_kernel_version.h 278;" d +CAS_BIO_DISCARD cas_cache/linux_kernel_version.h 282;" d +CAS_BIO_DISCARD cas_cache/linux_kernel_version.h 285;" d +CAS_BIO_GET_DEV cas_cache/linux_kernel_version.h 536;" d +CAS_BIO_GET_DEV cas_cache/linux_kernel_version.h 543;" d +CAS_BIO_SET_DEV cas_cache/linux_kernel_version.h 535;" d +CAS_BIO_SET_DEV cas_cache/linux_kernel_version.h 542;" d +CAS_BLK_DEV_REQ_TYPE_BIO cas_cache/layer_cache_management.h 8;" d +CAS_BLK_DEV_REQ_TYPE_REQ cas_cache/layer_cache_management.h 9;" d +CAS_BLK_STATUS_T cas_cache/linux_kernel_version.h 562;" d +CAS_BLK_STATUS_T cas_cache/linux_kernel_version.h 568;" d +CAS_CPUS_ALL cas_cache/threads.c 10;" d file: +CAS_DEBUG_IO cas_cache/object/obj_block_dev_bottom.c 8;" d file: +CAS_DEBUG_IO_ATOMIC cas_cache/object/obj_atomic_dev_bottom.c 12;" d file: +CAS_DEBUG_MSG cas_cache/layer_upgrade.c 14;" d file: +CAS_DEBUG_MSG cas_cache/layer_upgrade.c 22;" d file: +CAS_DEBUG_MSG cas_cache/object/obj_atomic_dev_bottom.c 18;" d file: +CAS_DEBUG_MSG cas_cache/object/obj_atomic_dev_bottom.c 26;" d file: +CAS_DEBUG_MSG cas_cache/object/obj_block_dev_bottom.c 14;" d file: +CAS_DEBUG_MSG cas_cache/object/obj_block_dev_bottom.c 21;" d file: +CAS_DEBUG_MSG cas_cache/utils/utils_rpool.c 16;" d file: +CAS_DEBUG_MSG cas_cache/utils/utils_rpool.c 24;" d file: +CAS_DEBUG_PARAM cas_cache/layer_upgrade.c 17;" d file: +CAS_DEBUG_PARAM cas_cache/layer_upgrade.c 23;" d file: +CAS_DEBUG_PARAM cas_cache/object/obj_atomic_dev_bottom.c 21;" d file: +CAS_DEBUG_PARAM cas_cache/object/obj_atomic_dev_bottom.c 27;" d file: +CAS_DEBUG_PARAM cas_cache/object/obj_block_dev_bottom.c 17;" d file: +CAS_DEBUG_PARAM cas_cache/object/obj_block_dev_bottom.c 22;" d file: +CAS_DEBUG_PARAM cas_cache/utils/utils_rpool.c 19;" d file: +CAS_DEBUG_PARAM cas_cache/utils/utils_rpool.c 25;" d file: +CAS_DEBUG_TRACE cas_cache/layer_upgrade.c 11;" d file: +CAS_DEBUG_TRACE cas_cache/layer_upgrade.c 21;" d file: +CAS_DEBUG_TRACE cas_cache/object/obj_atomic_dev_bottom.c 15;" d file: +CAS_DEBUG_TRACE cas_cache/object/obj_atomic_dev_bottom.c 25;" d file: +CAS_DEBUG_TRACE cas_cache/object/obj_block_dev_bottom.c 11;" d file: +CAS_DEBUG_TRACE cas_cache/object/obj_block_dev_bottom.c 20;" d file: +CAS_DEBUG_TRACE cas_cache/utils/utils_rpool.c 13;" d file: +CAS_DEBUG_TRACE cas_cache/utils/utils_rpool.c 23;" d file: +CAS_DSS_DATA_DIR cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_DIR = 7,$/;" e enum:__anon5 file: +CAS_DSS_DATA_DIRECT cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_DIRECT = 22,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_16KB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_16KB = 12,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_16MB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_16MB = 17,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_1GB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_1GB = 20,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_1MB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_1MB = 15,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_256KB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_256KB = 14,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_256MB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_256MB = 19,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_4KB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_4KB = 11,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_4MB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_4MB = 16,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_64KB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_64KB = 13,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_64MB cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_64MB = 18,$/;" e enum:__anon5 file: +CAS_DSS_DATA_FILE_BULK cas_cache/cas_cache_dss.c /^ CAS_DSS_DATA_FILE_BULK = 21,$/;" e enum:__anon5 file: +CAS_DSS_METADATA cas_cache/cas_cache_dss.c /^ CAS_DSS_METADATA = 1,$/;" e enum:__anon5 file: +CAS_DSS_MISC cas_cache/cas_cache_dss.c /^ CAS_DSS_MISC = 23$/;" e enum:__anon5 file: +CAS_DSS_UNCLASSIFIED cas_cache/cas_cache_dss.c /^ CAS_DSS_UNCLASSIFIED = 0,$/;" e enum:__anon5 file: +CAS_FLUSH_SUPPORTED cas_cache/linux_kernel_version.h 146;" d +CAS_FLUSH_SUPPORTED cas_cache/linux_kernel_version.h 149;" d +CAS_GARBAGE_COLLECTOR cas_cache/linux_kernel_version.h 470;" d +CAS_IS_DISCARD cas_cache/linux_kernel_version.h 266;" d +CAS_IS_DISCARD cas_cache/linux_kernel_version.h 271;" d +CAS_IS_DISCARD cas_cache/linux_kernel_version.h 276;" d +CAS_IS_DISCARD cas_cache/linux_kernel_version.h 281;" d +CAS_IS_DISCARD cas_cache/linux_kernel_version.h 284;" d +CAS_IS_WRITE_FLUSH cas_cache/linux_kernel_version.h 184;" d +CAS_IS_WRITE_FLUSH cas_cache/linux_kernel_version.h 186;" d +CAS_IS_WRITE_FLUSH cas_cache/linux_kernel_version.h 194;" d +CAS_IS_WRITE_FLUSH cas_cache/linux_kernel_version.h 198;" d +CAS_IS_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 206;" d +CAS_IS_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 210;" d +CAS_IS_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 216;" d +CAS_IS_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 221;" d +CAS_IS_WRITE_FUA cas_cache/linux_kernel_version.h 227;" d +CAS_IS_WRITE_FUA cas_cache/linux_kernel_version.h 229;" d +CAS_IS_WRITE_FUA cas_cache/linux_kernel_version.h 233;" d +CAS_IS_WRITE_FUA cas_cache/linux_kernel_version.h 236;" d +CAS_KERN_ALERT cas_cache/cas_cache.h 27;" d +CAS_KERN_CRIT cas_cache/cas_cache.h 28;" d +CAS_KERN_DEBUG cas_cache/cas_cache.h 33;" d +CAS_KERN_EMERG cas_cache/cas_cache.h 26;" d +CAS_KERN_ERR cas_cache/cas_cache.h 29;" d +CAS_KERN_INFO cas_cache/cas_cache.h 32;" d +CAS_KERN_NOTICE cas_cache/cas_cache.h 31;" d +CAS_KERN_WARNING cas_cache/cas_cache.h 30;" d +CAS_LOG_BURST_LIMIT cas_cache/context.c 22;" d file: +CAS_LOG_RATELIMIT cas_cache/context.c 20;" d file: +CAS_METADATA_MODE_ATOMIC include/cas_ioctl_codes.h 291;" d +CAS_METADATA_MODE_INVALID include/cas_ioctl_codes.h 292;" d +CAS_METADATA_MODE_NORMAL include/cas_ioctl_codes.h 290;" d +CAS_PRINT_RL cas_cache/linux_kernel_version.h 474;" d +CAS_PROPERTIES_CONST cas_cache/utils/utils_properties.h 24;" d +CAS_PROPERTIES_NON_CONST cas_cache/utils/utils_properties.h 23;" d +CAS_PROPERTIES_VERSION cas_cache/utils/utils_properties.c 11;" d file: +CAS_PROP_CHECK_CONST cas_cache/utils/utils_properties.c 23;" d file: +CAS_PROP_UNCONST cas_cache/utils/utils_properties.c 22;" d file: +CAS_RATELIMIT cas_cache/linux_kernel_version.h 547;" d +CAS_RATELIMIT cas_cache/linux_kernel_version.h 549;" d +CAS_REQ_FLUSH cas_cache/linux_kernel_version.h 145;" d +CAS_REQ_FLUSH cas_cache/linux_kernel_version.h 148;" d +CAS_RHEL_73 cas_cache/linux_kernel_version.h 346;" d +CAS_RPOOL_MIN_SIZE_ITEM cas_cache/utils/utils_rpool.h 9;" d +CAS_SET_DISCARD_ZEROES_DATA cas_cache/linux_kernel_version.h 592;" d +CAS_SET_DISCARD_ZEROES_DATA cas_cache/linux_kernel_version.h 594;" d +CAS_UBUNTU cas_cache/linux_kernel_version.h 48;" d +CAS_UPGRADE_DEBUG cas_cache/layer_upgrade.c 8;" d file: +CAS_UPGRADE_IFACE_CURRENT_VERSION cas_cache/layer_upgrade.c 119;" d file: +CAS_UPGRADE_IFACE_VERSION_19_03_00 cas_cache/layer_upgrade.c 118;" d file: +CAS_UTILS_RPOOL_DEBUG cas_cache/utils/utils_rpool.c 11;" d file: +CAS_VERSION cas_cache/main.c /^MODULE_VERSION(CAS_VERSION);$/;" v +CAS_VERSION cas_disk/main.c /^MODULE_VERSION(CAS_VERSION);$/;" v +CAS_VERSION include/cas_version.h 23;" d +CAS_VERSION_FLAG include/cas_version.h 18;" d +CAS_VERSION_FLAG include/cas_version.h 20;" d +CHECK_BARRIER cas_cache/linux_kernel_version.h 126;" d +CHECK_BARRIER cas_cache/linux_kernel_version.h 128;" d +CHECK_BARRIER cas_cache/linux_kernel_version.h 130;" d +CHECK_QUEUE_FLUSH cas_cache/linux_kernel_version.h 153;" d +CHECK_QUEUE_FLUSH cas_cache/linux_kernel_version.h 163;" d +CHECK_QUEUE_FUA cas_cache/linux_kernel_version.h 154;" d +CHECK_QUEUE_FUA cas_cache/linux_kernel_version.h 164;" d +CLEANING_ACP_MAX_BUFFERS_STR cas_cache/layer_upgrade.c 108;" d file: +CLEANING_ACP_WAKEUP_TIME_STR cas_cache/layer_upgrade.c 107;" d file: +CLEANING_ALRU_MAX_BUFFERS_STR cas_cache/layer_upgrade.c 105;" d file: +CLEANING_ALRU_STALENESS_TIME_STR cas_cache/layer_upgrade.c 104;" d file: +CLEANING_ALRU_TRESHOLD_STR cas_cache/layer_upgrade.c 106;" d file: +CLEANING_ALRU_WAKEUP_TIME_STR cas_cache/layer_upgrade.c 103;" d file: +CLEANING_POLICY_STR cas_cache/layer_upgrade.c 102;" d file: +CLOSE_BDEV_EXCLUSIVE cas_cache/linux_kernel_version.h 93;" d +CLOSE_BDEV_EXCLUSIVE cas_cache/linux_kernel_version.h 98;" d +CONFIG_STACK_VALIDATION config.mk /^CONFIG_STACK_VALIDATION=$/;" m +CORE_ID_STR cas_cache/layer_upgrade.c 97;" d file: +CORE_NO_STR cas_cache/layer_upgrade.c 96;" d file: +CORE_PATH_STR cas_cache/layer_upgrade.c 98;" d file: +CORE_SEQ_CUTOFF_POLICY_STR cas_cache/layer_upgrade.c 100;" d file: +CORE_SEQ_CUTOFF_THRESHOLD_STR cas_cache/layer_upgrade.c 99;" d file: +DAEMONIZE cas_cache/linux_kernel_version.h 243;" d +DAEMONIZE cas_cache/linux_kernel_version.h 245;" d +DECLARE_BLOCK_CALLBACK cas_cache/linux_kernel_version.h 77;" d +DECLARE_BLOCK_CALLBACK cas_cache/linux_kernel_version.h 83;" d +DECLARE_BLOCK_CALLBACK cas_cache/object/obj_atomic_dev_bottom.c /^static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,$/;" f file: +DECLARE_BLOCK_CALLBACK cas_cache/object/obj_block_dev_bottom.c /^DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,$/;" f +DECLARE_BLOCK_CALLBACK cas_disk/exp_obj.c /^DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio,$/;" f +DEPMOD Makefile /^DEPMOD:=$(shell which depmod)$/;" m +DISK_MODULE Makefile /^DISK_MODULE = cas_disk$/;" m +ENOTSUP cas_cache/linux_kernel_version.h 342;" d +ENV_ALLOCATOR_NAME_MAX cas_cache/ocf_env.c 87;" d file: +ENV_BUG cas_cache/ocf_env.h 581;" d +ENV_BUG_ON cas_cache/ocf_env.h 582;" d +ENV_MEM_ATOMIC cas_cache/ocf_env.h 21;" d +ENV_MEM_NOIO cas_cache/ocf_env.h 20;" d +ENV_MEM_NORMAL cas_cache/ocf_env.h 19;" d +ENV_PRIu64 cas_cache/ocf_env.h 576;" d +ENV_SECTOR_SHIFT cas_cache/ocf_env.h 15;" d +ENV_WARN cas_cache/ocf_env.h 578;" d +ENV_WARN_ON cas_cache/ocf_env.h 579;" d +EXTERNAL_CALL cas_cache/utils/utils_properties.c 9;" d file: +FILE_INODE cas_cache/linux_kernel_version.h 57;" d +FILE_INODE cas_cache/linux_kernel_version.h 59;" d +FMT_VERSION include/cas_version.h 15;" d +FORMAT_WORKAROUND_NEED cas_cache/utils/utils_nvme.c 104;" d file: +FORMAT_WORKAROUND_NOT_NEED cas_cache/utils/utils_nvme.c 103;" d file: +GET_CMD_INFO cas_cache/service_ui_ioctl.c 79;" d file: +INCDIR config.mk /^INCDIR = ""$/;" m +INCDIR config.mk /^INCDIR = $(PWD)\/include$/;" m +INTERNAL_CALL cas_cache/utils/utils_properties.c 8;" d file: +INVALID_FLUSH_PARAM include/cas_ioctl_codes.h 33;" d +IO_CLASS_CACHE_MODE_STR cas_cache/layer_upgrade.c 116;" d file: +IO_CLASS_ID_STR cas_cache/layer_upgrade.c 113;" d file: +IO_CLASS_MAX_STR cas_cache/layer_upgrade.c 114;" d file: +IO_CLASS_MIN_STR cas_cache/layer_upgrade.c 112;" d file: +IO_CLASS_NAME_STR cas_cache/layer_upgrade.c 111;" d file: +IO_CLASS_NO_STR cas_cache/layer_upgrade.c 110;" d file: +IO_CLASS_PRIO_STR cas_cache/layer_upgrade.c 115;" d file: +KCAS_ERR_A_PART include/cas_ioctl_codes.h /^ KCAS_ERR_A_PART,$/;" e enum:kcas_error +KCAS_ERR_BAD_RANGE include/cas_ioctl_codes.h /^ KCAS_ERR_BAD_RANGE,$/;" e enum:kcas_error +KCAS_ERR_CONTAINS_PART include/cas_ioctl_codes.h /^ KCAS_ERR_CONTAINS_PART,$/;" e enum:kcas_error +KCAS_ERR_CORE_POOL_NOT_EMPTY include/cas_ioctl_codes.h /^ KCAS_ERR_CORE_POOL_NOT_EMPTY,$/;" e enum:kcas_error +KCAS_ERR_DEV_PENDING include/cas_ioctl_codes.h /^ KCAS_ERR_DEV_PENDING,$/;" e enum:kcas_error +KCAS_ERR_DEV_SPACE include/cas_ioctl_codes.h /^ KCAS_ERR_DEV_SPACE,$/;" e enum:kcas_error +KCAS_ERR_DIRTY_EXISTS_NVME include/cas_ioctl_codes.h /^ KCAS_ERR_DIRTY_EXISTS_NVME,$/;" e enum:kcas_error +KCAS_ERR_FILE_EXISTS include/cas_ioctl_codes.h /^ KCAS_ERR_FILE_EXISTS,$/;" e enum:kcas_error +KCAS_ERR_FORMAT_FAILED include/cas_ioctl_codes.h /^ KCAS_ERR_FORMAT_FAILED,$/;" e enum:kcas_error +KCAS_ERR_INV_IOCTL include/cas_ioctl_codes.h /^ KCAS_ERR_INV_IOCTL,$/;" e enum:kcas_error +KCAS_ERR_IN_UPGRADE include/cas_ioctl_codes.h /^ KCAS_ERR_IN_UPGRADE,$/;" e enum:kcas_error +KCAS_ERR_NOT_NVME include/cas_ioctl_codes.h /^ KCAS_ERR_NOT_NVME,$/;" e enum:kcas_error +KCAS_ERR_NO_CACHE_ATTACHED include/cas_ioctl_codes.h /^ KCAS_ERR_NO_CACHE_ATTACHED,$/;" e enum:kcas_error +KCAS_ERR_NO_STORED_CONF include/cas_ioctl_codes.h /^ KCAS_ERR_NO_STORED_CONF,$/;" e enum:kcas_error +KCAS_ERR_NVME_BAD_FORMAT include/cas_ioctl_codes.h /^ KCAS_ERR_NVME_BAD_FORMAT,$/;" e enum:kcas_error +KCAS_ERR_REMOVED_DIRTY include/cas_ioctl_codes.h /^ KCAS_ERR_REMOVED_DIRTY,$/;" e enum:kcas_error +KCAS_ERR_ROLLBACK include/cas_ioctl_codes.h /^ KCAS_ERR_ROLLBACK,$/;" e enum:kcas_error +KCAS_ERR_ROOT include/cas_ioctl_codes.h /^ KCAS_ERR_ROOT = 2000000,$/;" e enum:kcas_error +KCAS_ERR_STOPPED_DIRTY include/cas_ioctl_codes.h /^ KCAS_ERR_STOPPED_DIRTY,$/;" e enum:kcas_error +KCAS_ERR_SYSTEM include/cas_ioctl_codes.h /^ KCAS_ERR_SYSTEM,$/;" e enum:kcas_error +KCAS_ERR_UNALIGNED include/cas_ioctl_codes.h /^ KCAS_ERR_UNALIGNED,$/;" e enum:kcas_error +KCAS_IOCTL_CACHE_CHECK_DEVICE include/cas_ioctl_codes.h 666;" d +KCAS_IOCTL_CACHE_INFO include/cas_ioctl_codes.h 611;" d +KCAS_IOCTL_CORE_INFO include/cas_ioctl_codes.h 622;" d +KCAS_IOCTL_CORE_POOL_REMOVE include/cas_ioctl_codes.h 655;" d +KCAS_IOCTL_FLUSH_CACHE include/cas_ioctl_codes.h 457;" d +KCAS_IOCTL_FLUSH_CORE include/cas_ioctl_codes.h 487;" d +KCAS_IOCTL_GET_CACHE_COUNT include/cas_ioctl_codes.h 521;" d +KCAS_IOCTL_GET_CACHE_PARAM include/cas_ioctl_codes.h 710;" d +KCAS_IOCTL_GET_CAPABILITIES include/cas_ioctl_codes.h 543;" d +KCAS_IOCTL_GET_CORE_PARAM include/cas_ioctl_codes.h 688;" d +KCAS_IOCTL_GET_CORE_POOL_COUNT include/cas_ioctl_codes.h 633;" d +KCAS_IOCTL_GET_CORE_POOL_PATHS include/cas_ioctl_codes.h 644;" d +KCAS_IOCTL_INSERT_CORE include/cas_ioctl_codes.h 589;" d +KCAS_IOCTL_INTERRUPT_FLUSHING include/cas_ioctl_codes.h 475;" d +KCAS_IOCTL_LIST_CACHE include/cas_ioctl_codes.h 532;" d +KCAS_IOCTL_MAGIC include/cas_ioctl_codes.h 409;" d +KCAS_IOCTL_NVME_FORMAT include/cas_ioctl_codes.h 566;" d +KCAS_IOCTL_PARTITION_SET include/cas_ioctl_codes.h 510;" d +KCAS_IOCTL_PARTITION_STATS include/cas_ioctl_codes.h 499;" d +KCAS_IOCTL_REMOVE_CORE include/cas_ioctl_codes.h 600;" d +KCAS_IOCTL_RESET_STATS include/cas_ioctl_codes.h 445;" d +KCAS_IOCTL_SET_CACHE_PARAM include/cas_ioctl_codes.h 699;" d +KCAS_IOCTL_SET_CACHE_STATE include/cas_ioctl_codes.h 433;" d +KCAS_IOCTL_SET_CORE_PARAM include/cas_ioctl_codes.h 677;" d +KCAS_IOCTL_START_CACHE include/cas_ioctl_codes.h 577;" d +KCAS_IOCTL_STOP_CACHE include/cas_ioctl_codes.h 421;" d +KCAS_IOCTL_UPGRADE include/cas_ioctl_codes.h 555;" d +KERNEL_DIR Makefile /^KERNEL_DIR ?= "\/lib\/modules\/$(shell uname -r)\/build"$/;" m +KERNEL_MAJOR config.mk /^KERNEL_MAJOR = $(shell echo $(KERNELRELEASE) | cut -d'.' -f2)$/;" m +KERNEL_VERSION Makefile /^KERNEL_VERSION := $(shell uname -r)$/;" m +KERNEL_VERSION config.mk /^KERNEL_VERSION = $(shell echo $(KERNELRELEASE) | cut -d'.' -f1)$/;" m +KMEM_CACHE_MIN_SIZE cas_disk/exp_obj.c 19;" d file: +KRETURN cas_disk/cas_disk_defs.h 81;" d +KRETURN cas_disk/cas_disk_defs.h 84;" d +KRETURN cas_disk/cas_disk_defs.h 87;" d +LIST_FIRST_ITEM cas_cache/utils/utils_rpool.c 199;" d file: +LOOKUP_BDEV cas_cache/linux_kernel_version.h 103;" d +LOOKUP_BDEV cas_cache/linux_kernel_version.h 105;" d +MAKE_RQ_RET_TYPE cas_disk/cas_disk_defs.h 82;" d +MAKE_RQ_RET_TYPE cas_disk/cas_disk_defs.h 85;" d +MAKE_RQ_RET_TYPE cas_disk/cas_disk_defs.h 88;" d +MAX_ELEVATOR_NAME include/cas_ioctl_codes.h 28;" d +MAX_LINES_PER_IO cas_cache/cas_cache.h 46;" d +MAX_STRING_SIZE cas_cache/utils/utils_properties.h 21;" d +MAX_STR_LEN include/cas_ioctl_codes.h 23;" d +MAX_THREAD_NAME_SIZE cas_cache/threads.c 8;" d file: +MODPROBE Makefile /^MODPROBE:=$(shell which modprobe)$/;" m +MODULES_DIR Makefile /^MODULES_DIR=\/lib\/modules\/$(KERNEL_VERSION)\/extra$/;" m +NVME_CONTROLLER cas_cache/cas_cache.h /^ NVME_CONTROLLER$/;" e enum:object_type_t +NVME_FULL config.mk /^NVME_FULL = 0$/;" m +NVME_FULL config.mk /^NVME_FULL = 1$/;" m +NVME_ID_CNS_CTRL cas_cache/utils/utils_nvme.c 38;" d file: +NVME_ID_CNS_NS cas_cache/utils/utils_nvme.c 37;" d file: +OBJECT_TYPE_MAX cas_cache/cas_cache.h /^ OBJECT_TYPE_MAX,$/;" e enum:object_type_t +OBJTOOL config.mk /^OBJTOOL=$(shell [ -f tools\/objtool\/objtool ] && echo "y")$/;" m +OCFDIR Makefile /^OCFDIR=$(PWD)\/..\/ocf$/;" m +OCF_LOGO cas_cache/ocf_env_headers.h 13;" d +OCF_PREFIX_LONG cas_cache/ocf_env_headers.h 15;" d +OCF_PREFIX_SHORT cas_cache/ocf_env_headers.h 14;" d +OCF_VERSION_MAIN cas_cache/ocf_env_headers.h 17;" d +OCF_VERSION_MAJOR cas_cache/ocf_env_headers.h 18;" d +OCF_VERSION_MINOR cas_cache/ocf_env_headers.h 19;" d +OCF_WRITE_FLUSH cas_cache/linux_kernel_version.h 190;" d +OCF_WRITE_FLUSH cas_cache/linux_kernel_version.h 193;" d +OCF_WRITE_FLUSH cas_cache/linux_kernel_version.h 200;" d +OCF_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 204;" d +OCF_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 219;" d +OCF_WRITE_FLUSH_FUA cas_cache/linux_kernel_version.h 222;" d +OCF_WRITE_FUA cas_cache/linux_kernel_version.h 231;" d +OCF_WRITE_FUA cas_cache/linux_kernel_version.h 234;" d +OCF_WRITE_FUA cas_cache/linux_kernel_version.h 237;" d +OPEN_BDEV_EXCLUSIVE cas_cache/linux_kernel_version.h 91;" d +OPEN_BDEV_EXCLUSIVE cas_cache/linux_kernel_version.h 96;" d +PAGES_TO_BYTES cas_cache/cas_cache.h 44;" d +PG_cas cas_cache/context.c 18;" d file: +PR_STR include/cas_version.h 14;" d +PWD Makefile /^PWD=$(shell pwd)$/;" m +REFER_BLOCK_CALLBACK cas_cache/linux_kernel_version.h 75;" d +RETURN_CMD_RESULT cas_cache/service_ui_ioctl.c 90;" d file: +RMMOD Makefile /^RMMOD :=$(shell which rmmod)$/;" m +RPOOL_ENTRY_TO_ITEM cas_cache/utils/utils_rpool.c 54;" d file: +RPOOL_ITEM_TO_ENTRY cas_cache/utils/utils_rpool.c 50;" d file: +RQ_CHECK_CONTINOUS cas_cache/linux_kernel_version.h 325;" d +RQ_DATA_DIR cas_cache/linux_kernel_version.h 134;" d +RQ_DATA_DIR cas_cache/linux_kernel_version.h 137;" d +RQ_DATA_DIR cas_cache/linux_kernel_version.h 140;" d +RQ_DATA_DIR_WR cas_cache/linux_kernel_version.h 135;" d +RQ_DATA_DIR_WR cas_cache/linux_kernel_version.h 138;" d +RQ_DATA_DIR_WR cas_cache/linux_kernel_version.h 141;" d +RQ_IS_FLUSH cas_cache/linux_kernel_version.h 182;" d +RQ_IS_FLUSH cas_cache/linux_kernel_version.h 192;" d +RQ_IS_FLUSH cas_cache/linux_kernel_version.h 197;" d +SECTOR_SHIFT cas_cache/cas_cache.h 36;" d +SECTOR_SIZE cas_cache/cas_cache.h 40;" d +SEGMENT_BVEC cas_cache/linux_kernel_version.h 329;" d +SEGMENT_BVEC cas_cache/linux_kernel_version.h 331;" d +SET_QUEUE_CHUNK_SECTORS cas_cache/linux_kernel_version.h 249;" d +SET_QUEUE_CHUNK_SECTORS cas_cache/linux_kernel_version.h 252;" d +SHRT_MAX cas_cache/linux_kernel_version.h 339;" d +SHRT_MIN cas_cache/linux_kernel_version.h 335;" d +SLES config.mk /^SLES ?= $(shell cat \/etc\/SuSE-release 2>\/dev\/null)$/;" m +SLES_VERSION config.mk /^SLES_VERSION := $(shell cat \/etc\/os-release |\\$/;" m +STR_PREP include/cas_version.h 13;" d +UPGRADE_IFACE_VERSION_STR cas_cache/layer_upgrade.c 88;" d file: +UTILS_BLK_H_ cas_cache/utils/utils_blk.h 7;" d +UTILS_DATA_H_ cas_cache/utils/utils_data.h 7;" d +UTILS_GC_H_ cas_cache/utils/utils_gc.h 7;" d +UTILS_NVME_H_ cas_cache/utils/utils_nvme.h 7;" d +UTILS_PROPERTIES_H_ cas_cache/utils/utils_properties.h 7;" d +VERSION_FILE Makefile /^VERSION_FILE=$(PWD)\/CAS_VERSION$/;" m +VERSION_FILE config.mk /^VERSION_FILE := $(M)\/CAS_VERSION$/;" m +VERSION_STR cas_cache/utils/utils_properties.c 13;" d file: +__CASDISK_DEBUG_H__ cas_disk/debug.h 6;" d +__CASDISK_DEFS_H__ cas_disk/cas_disk_defs.h 6;" d +__CASDISK_DISK_H__ cas_disk/disk.h 6;" d +__CASDISK_EXP_OBJ_H__ cas_disk/exp_obj.h 6;" d +__CASDISK_H__ cas_disk/cas_disk.h 6;" d +__CASDISK_SYSFS_H__ cas_disk/sysfs.h 6;" d +__CAS_CACHE_DSS_H__ cas_cache/cas_cache_dss.h 6;" d +__CAS_CACHE_H__ cas_cache/cas_cache.h 7;" d +__CAS_CONTROL_H__ cas_cache/control.h 6;" d +__CAS_IOCTL_CODES_H__ include/cas_ioctl_codes.h 7;" d +__CAS_RPOOL_H__ cas_cache/utils/utils_rpool.h 7;" d +__CAS_UTILS_H__ cas_cache/utils/cas_cache_utils.h 8;" d +__CAS_VERSION_H__ include/cas_version.h 7;" d +__CONTEXT_H__ cas_cache/context.h 8;" d +__LAYER_CACHE_MANAGEMENT_H__ cas_cache/layer_cache_management.h 6;" d +__LAYER_UPGRADE_H cas_cache/layer_upgrade.h 8;" d +__LINUX_KERNEL_VERSION_H__ cas_cache/linux_kernel_version.h 7;" d +__OBJ_ATOMIC_DEV_BOTTOM_H__ cas_cache/object/obj_atomic_dev_bottom.h 7;" d +__OBJ_BLK_H__ cas_cache/object/obj_blk.h 7;" d +__OBJ_BLK_UTILS_H__ cas_cache/object/obj_blk_utils.h 7;" d +__OBJ_BLOCK_DEV_BOTTOM_H__ cas_cache/object/obj_block_dev_bottom.h 7;" d +__OBJ_BLOCK_DEV_TOP_H__ cas_cache/object/obj_block_dev_top.h 7;" d +__OCF_ENV_HEADERS_H__ cas_cache/ocf_env_headers.h 8;" d +__OCF_ENV_H__ cas_cache/ocf_env.h 8;" d +__SERVICE_UI_IOCTL_H__ cas_cache/service_ui_ioctl.h 8;" d +__THREADS_H__ cas_cache/threads.h 8;" d +__block_dev_get_elevator_name cas_cache/object/obj_block_dev_bottom.c /^static char *__block_dev_get_elevator_name(struct request_queue *q)$/;" f file: +__blockdev_end_request_all cas_cache/object/obj_block_dev_top.c /^static inline void __blockdev_end_request_all(struct request *rq, int error)$/;" f file: +__cas_ctx_data_alloc cas_cache/context.c /^ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc)$/;" f +__cas_nvme_check_fw cas_cache/utils/utils_nvme.c /^static int __cas_nvme_check_fw(struct nvme_id_ctrl *id_ctrl)$/;" f file: +__casdsk_disk_attach cas_disk/disk.c /^static inline int __casdsk_disk_attach(struct casdsk_disk *dsk,$/;" f file: +__casdsk_disk_clear_pt cas_disk/disk.c /^static inline int __casdsk_disk_clear_pt(struct casdsk_disk *dsk)$/;" f file: +__casdsk_disk_close cas_disk/disk.c /^static void __casdsk_disk_close(struct casdsk_disk *dsk)$/;" f file: +__casdsk_disk_dettach cas_disk/disk.c /^static inline int __casdsk_disk_dettach(struct casdsk_disk *dsk)$/;" f file: +__casdsk_disk_set_attached cas_disk/disk.c /^static inline int __casdsk_disk_set_attached(struct casdsk_disk *dsk)$/;" f file: +__casdsk_disk_set_pt cas_disk/disk.c /^static inline int __casdsk_disk_set_pt(struct casdsk_disk *dsk)$/;" f file: +__casdsk_exp_obj_release cas_disk/exp_obj.c /^static void __casdsk_exp_obj_release(struct casdsk_exp_obj *exp_obj)$/;" f file: +_blkdev_can_hndl_bio cas_cache/object/obj_block_dev_top.c /^static inline int _blkdev_can_hndl_bio(struct bio *bio)$/;" f file: +_blkdev_handle_flush_request cas_cache/object/obj_block_dev_top.c /^static int _blkdev_handle_flush_request(struct request *rq, ocf_core_t core)$/;" f file: +_blkdev_handle_request cas_cache/object/obj_block_dev_top.c /^static int _blkdev_handle_request(struct request *rq, ocf_core_t core)$/;" f file: +_blkdev_is_flush_fua_bio cas_cache/object/obj_block_dev_top.c /^static inline bool _blkdev_is_flush_fua_bio(struct bio *bio)$/;" f file: +_blkdev_scan_request cas_cache/object/obj_block_dev_top.c /^static uint32_t _blkdev_scan_request(ocf_cache_t cache, struct request *rq,$/;" f file: +_block_dev_lock_exported_object cas_cache/object/obj_block_dev_top.c /^static int _block_dev_lock_exported_object(ocf_core_t core, void *cntx)$/;" f file: +_block_dev_stop_exported_object cas_cache/object/obj_block_dev_top.c /^static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)$/;" f file: +_block_dev_unlock_exported_object cas_cache/object/obj_block_dev_top.c /^static int _block_dev_unlock_exported_object(ocf_core_t core, void *cntx)$/;" f file: +_blockdev_alloc_many_requests cas_cache/object/obj_block_dev_top.c /^static int _blockdev_alloc_many_requests(ocf_core_t core,$/;" f file: +_blockdev_calc_discard_alignment cas_cache/object/obj_block_dev_top.c /^static int _blockdev_calc_discard_alignment(ocf_cache_t cache,$/;" f file: +_blockdev_can_handle_rq cas_cache/object/obj_block_dev_top.c /^static inline bool _blockdev_can_handle_rq(struct request *rq)$/;" f file: +_blockdev_end_io_acct cas_cache/object/obj_block_dev_top.c /^static inline void _blockdev_end_io_acct(struct bio *bio,$/;" f file: +_blockdev_end_request_all cas_cache/object/obj_block_dev_top.c /^static inline void _blockdev_end_request_all(struct request *rq, int error)$/;" f file: +_blockdev_exp_obj_ops cas_cache/object/obj_block_dev_top.c /^static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {$/;" v typeref:struct:casdsk_exp_obj_ops file: +_blockdev_is_elevator_inited cas_cache/object/obj_block_dev_top.c /^static inline bool _blockdev_is_elevator_inited(struct request_queue *q)$/;" f file: +_blockdev_is_request_barier cas_cache/object/obj_block_dev_top.c /^bool _blockdev_is_request_barier(struct request *rq)$/;" f +_blockdev_make_request_discard cas_cache/object/obj_block_dev_top.c /^static void _blockdev_make_request_discard(struct casdsk_disk *dsk,$/;" f file: +_blockdev_make_request_fast cas_cache/object/obj_block_dev_top.c /^static int _blockdev_make_request_fast(struct casdsk_disk *dsk,$/;" f file: +_blockdev_peek_request cas_cache/object/obj_block_dev_top.c /^static inline struct request *_blockdev_peek_request(struct request_queue *q)$/;" f file: +_blockdev_prep_rq_fn cas_cache/object/obj_block_dev_top.c /^static int _blockdev_prep_rq_fn(struct casdsk_disk *dsk, struct request_queue *q,$/;" f file: +_blockdev_prepare_queue cas_cache/object/obj_block_dev_top.c /^static int _blockdev_prepare_queue(struct casdsk_disk *dsk,$/;" f file: +_blockdev_request_fn cas_cache/object/obj_block_dev_top.c /^static void _blockdev_request_fn(struct casdsk_disk *dsk, struct request_queue *q,$/;" f file: +_blockdev_set_bio_data cas_cache/object/obj_block_dev_top.c /^static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)$/;" f file: +_blockdev_set_discard_properties cas_cache/object/obj_block_dev_top.c /^static void _blockdev_set_discard_properties(ocf_cache_t cache,$/;" f file: +_blockdev_set_exported_object_flush_fua cas_cache/object/obj_block_dev_top.c /^void _blockdev_set_exported_object_flush_fua(ocf_core_t core)$/;" f +_blockdev_set_geometry cas_cache/object/obj_block_dev_top.c /^static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)$/;" f file: +_blockdev_set_request_data cas_cache/object/obj_block_dev_top.c /^static void _blockdev_set_request_data(struct blk_data *data, struct request *rq)$/;" f file: +_blockdev_start_io_acct cas_cache/object/obj_block_dev_top.c /^static inline void _blockdev_start_io_acct(struct bio *bio)$/;" f file: +_blockdev_start_request cas_cache/object/obj_block_dev_top.c /^static inline void _blockdev_start_request(struct request *rq)$/;" f file: +_bvec_is_mergeable cas_cache/object/obj_block_dev_top.c /^static inline bool _bvec_is_mergeable(struct bio_vec *bv1, struct bio_vec *bv2)$/;" f file: +_cache_mng_core_pool_get_paths_visitor cas_cache/layer_cache_management.c /^int _cache_mng_core_pool_get_paths_visitor(ocf_uuid_t uuid, void *ctx)$/;" f +_cache_mng_create_exported_object cas_cache/layer_cache_management.c /^static int _cache_mng_create_exported_object(ocf_core_t core, void *cntx)$/;" f file: +_cache_mng_destroy_exported_object cas_cache/layer_cache_management.c /^static int _cache_mng_destroy_exported_object(ocf_core_t core, void *cntx)$/;" f file: +_cache_mng_load cas_cache/layer_cache_management.c /^static int _cache_mng_load(struct ocf_mngt_cache_config *cfg,$/;" f file: +_cache_mng_log_cache_device_path cas_cache/layer_cache_management.c /^static void _cache_mng_log_cache_device_path(ocf_cache_t cache,$/;" f file: +_cache_mng_log_core_device_path cas_cache/layer_cache_management.c /^static void _cache_mng_log_core_device_path(ocf_core_t core)$/;" f file: +_cache_mng_log_core_device_path_visitor cas_cache/layer_cache_management.c /^static int _cache_mng_log_core_device_path_visitor(ocf_core_t core, void *cntx)$/;" f file: +_cache_mng_remove_core_prepare cas_cache/layer_cache_management.c /^int _cache_mng_remove_core_prepare(ocf_cache_t cache, ocf_core_t core,$/;" f +_cache_mng_start cas_cache/layer_cache_management.c /^static int _cache_mng_start(struct ocf_mngt_cache_config *cfg,$/;" f file: +_cache_mng_start_queues cas_cache/layer_cache_management.c /^static int _cache_mng_start_queues(ocf_cache_t cache)$/;" f file: +_cas_alloc_page_rpool cas_cache/context.c /^void *_cas_alloc_page_rpool(void *allocator_ctx, int cpu)$/;" f +_cas_atomic_setup_cmd cas_cache/object/obj_atomic_dev_bottom.c /^static void _cas_atomic_setup_cmd($/;" f file: +_cas_atomic_write_zeroes_end cas_cache/object/obj_atomic_dev_bottom.c /^static void _cas_atomic_write_zeroes_end(struct cas_atomic_write_zero_ctx *ctx,$/;" f file: +_cas_atomic_write_zeroes_step_cmpl cas_cache/object/obj_atomic_dev_bottom.c /^static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error)$/;" f file: +_cas_atomic_write_zeroes_work cas_cache/object/obj_atomic_dev_bottom.c /^void _cas_atomic_write_zeroes_work(struct work_struct *work)$/;" f +_cas_blk_identify_type cas_cache/object/obj_blk_utils.c /^int _cas_blk_identify_type(const char *path, uint8_t *type,$/;" f +_cas_cache_attached_check_visitor cas_cache/layer_upgrade.c /^static int _cas_cache_attached_check_visitor(ocf_cache_t cache, void *cntx)$/;" f file: +_cas_cache_dump_conf_visitor_ctx cas_cache/layer_upgrade.c /^struct _cas_cache_dump_conf_visitor_ctx {$/;" s file: +_cas_cleaner_complete cas_cache/threads.c /^static void _cas_cleaner_complete(ocf_cleaner_t c, uint32_t interval)$/;" f file: +_cas_cleaner_thread cas_cache/threads.c /^static int _cas_cleaner_thread(void *data)$/;" f file: +_cas_create_thread cas_cache/threads.c /^static int _cas_create_thread(struct cas_thread_info **pinfo,$/;" f file: +_cas_ctx_cleaner_init cas_cache/context.c /^static int _cas_ctx_cleaner_init(ocf_cleaner_t c)$/;" f file: +_cas_ctx_cleaner_stop cas_cache/context.c /^static void _cas_ctx_cleaner_stop(ocf_cleaner_t c)$/;" f file: +_cas_ctx_data_copy cas_cache/context.c /^static uint64_t _cas_ctx_data_copy(ctx_data_t *dst, ctx_data_t *src,$/;" f file: +_cas_ctx_data_mlock cas_cache/context.c /^static int _cas_ctx_data_mlock(ctx_data_t *ctx_data)$/;" f file: +_cas_ctx_data_munlock cas_cache/context.c /^static void _cas_ctx_data_munlock(ctx_data_t *ctx_data)$/;" f file: +_cas_ctx_logger_dump_stack cas_cache/context.c /^static int _cas_ctx_logger_dump_stack(ocf_logger_t logger)$/;" f file: +_cas_ctx_logger_printf cas_cache/context.c /^static int _cas_ctx_logger_printf(ocf_logger_t logger, ocf_logger_lvl_t lvl,$/;" f file: +_cas_ctx_logger_printf_rl cas_cache/context.c /^static int _cas_ctx_logger_printf_rl(ocf_logger_t logger, const char *func_name)$/;" f file: +_cas_ctx_metadata_updater_init cas_cache/context.c /^static int _cas_ctx_metadata_updater_init(ocf_metadata_updater_t mu)$/;" f file: +_cas_ctx_metadata_updater_kick cas_cache/context.c /^static void _cas_ctx_metadata_updater_kick(ocf_metadata_updater_t mu)$/;" f file: +_cas_ctx_metadata_updater_stop cas_cache/context.c /^static void _cas_ctx_metadata_updater_stop(ocf_metadata_updater_t mu)$/;" f file: +_cas_ctx_queue_init cas_cache/context.c /^static int _cas_ctx_queue_init(ocf_queue_t q)$/;" f file: +_cas_ctx_queue_kick cas_cache/context.c /^static void _cas_ctx_queue_kick(ocf_queue_t q)$/;" f file: +_cas_ctx_queue_stop cas_cache/context.c /^static void _cas_ctx_queue_stop(ocf_queue_t q)$/;" f file: +_cas_ctx_read_data cas_cache/context.c /^static uint32_t _cas_ctx_read_data(void *dst, ctx_data_t *src,$/;" f file: +_cas_ctx_seek_data cas_cache/context.c /^static uint32_t _cas_ctx_seek_data(ctx_data_t *dst,$/;" f file: +_cas_ctx_write_data cas_cache/context.c /^static uint32_t _cas_ctx_write_data(ctx_data_t *dst, const void *src,$/;" f file: +_cas_ctx_zero_data cas_cache/context.c /^static uint32_t _cas_ctx_zero_data(ctx_data_t *dst, uint32_t size)$/;" f file: +_cas_detect_blk_type cas_cache/object/obj_blk_utils.c /^static inline int _cas_detect_blk_type(const char *path, uint8_t *type,$/;" f file: +_cas_free_page_rpool cas_cache/context.c /^static void _cas_free_page_rpool(void *allocator_ctx, void *item)$/;" f file: +_cas_io_queue_thread cas_cache/threads.c /^static int _cas_io_queue_thread(void *data)$/;" f file: +_cas_metadata_updater_thread cas_cache/threads.c /^static int _cas_metadata_updater_thread(void *data)$/;" f file: +_cas_nvme_controller_identify cas_cache/utils/utils_nvme.c /^static int _cas_nvme_controller_identify(struct file *character_device_file,$/;" f file: +_cas_nvme_format_bdev cas_cache/utils/utils_nvme.c /^static int _cas_nvme_format_bdev(struct block_device *bdev, unsigned int nsid,$/;" f file: +_cas_nvme_format_character_device cas_cache/utils/utils_nvme.c /^static int _cas_nvme_format_character_device(const char *device_path,$/;" f file: +_cas_nvme_format_controller cas_cache/utils/utils_nvme.c /^static int _cas_nvme_format_controller(struct file *character_device_file,$/;" f file: +_cas_nvme_format_namespace_by_path cas_cache/utils/utils_nvme.c /^static int _cas_nvme_format_namespace_by_path(const char *device_path,$/;" f file: +_cas_nvme_get_bdev_from_controller cas_cache/utils/utils_nvme.c /^static int _cas_nvme_get_bdev_from_controller(struct block_device **bdev,$/;" f file: +_cas_nvme_preformat_check cas_cache/utils/utils_nvme.c /^static int _cas_nvme_preformat_check(struct block_device *bdev, int force)$/;" f file: +_cas_page_clear_priv cas_cache/context.c /^static inline void _cas_page_clear_priv(struct page *page)$/;" f file: +_cas_page_get_cpu cas_cache/context.c /^static int _cas_page_get_cpu(struct page *page)$/;" f file: +_cas_page_set_cpu cas_cache/context.c /^static void _cas_page_set_cpu(struct page *page, int cpu)$/;" f file: +_cas_page_set_priv cas_cache/context.c /^static inline void _cas_page_set_priv(struct page *page)$/;" f file: +_cas_page_test_priv cas_cache/context.c /^static inline int _cas_page_test_priv(struct page *page)$/;" f file: +_cas_prop_alloc_entry_key cas_cache/utils/utils_properties.c /^static struct _cas_property *_cas_prop_alloc_entry_key(const char *key)$/;" f file: +_cas_prop_find cas_cache/utils/utils_properties.c /^static struct _cas_property *_cas_prop_find(const struct cas_properties *props,$/;" f file: +_cas_prop_get_size cas_cache/utils/utils_properties.c /^static uint64_t _cas_prop_get_size(struct cas_properties *props)$/;" f file: +_cas_prop_parse_int cas_cache/utils/utils_properties.c /^static int _cas_prop_parse_int(const char *buffer,$/;" f file: +_cas_prop_parse_sint cas_cache/utils/utils_properties.c /^static int _cas_prop_parse_sint(const char *buffer,$/;" f file: +_cas_prop_parse_string cas_cache/utils/utils_properties.c /^static int _cas_prop_parse_string(const char *buffer, const uint64_t size,$/;" f file: +_cas_prop_parse_uint cas_cache/utils/utils_properties.c /^static int _cas_prop_parse_uint(const char *buffer,$/;" f file: +_cas_prop_parse_version cas_cache/utils/utils_properties.c /^static int _cas_prop_parse_version(const char *buffer, uint64_t *offset,$/;" f file: +_cas_prop_serialize cas_cache/utils/utils_properties.c /^static int _cas_prop_serialize(struct _cas_property *entry, void *buffer,$/;" f file: +_cas_prop_serialize_int cas_cache/utils/utils_properties.c /^static int _cas_prop_serialize_int(char *buffer, const uint64_t size,$/;" f file: +_cas_prop_serialize_sint cas_cache/utils/utils_properties.c /^static int _cas_prop_serialize_sint(char *buffer, const uint64_t size,$/;" f file: +_cas_prop_serialize_string cas_cache/utils/utils_properties.c /^static int _cas_prop_serialize_string(char *buffer, const uint64_t size,$/;" f file: +_cas_prop_serialize_uint cas_cache/utils/utils_properties.c /^static int _cas_prop_serialize_uint(char *buffer, const uint64_t size,$/;" f file: +_cas_property cas_cache/utils/utils_properties.c /^struct _cas_property {$/;" s file: +_cas_reserve_pool_per_cpu cas_cache/utils/utils_rpool.c /^struct _cas_reserve_pool_per_cpu {$/;" s file: +_cas_rpool_pre_alloc_do cas_cache/utils/utils_rpool.c /^void _cas_rpool_pre_alloc_do(struct work_struct *ws)$/;" f +_cas_rpool_pre_alloc_info cas_cache/utils/utils_rpool.c /^struct _cas_rpool_pre_alloc_info {$/;" s file: +_cas_rpool_pre_alloc_schedule cas_cache/utils/utils_rpool.c /^int _cas_rpool_pre_alloc_schedule(int cpu,$/;" f +_cas_start_thread cas_cache/threads.c /^static void _cas_start_thread(struct cas_thread_info *info)$/;" f file: +_cas_stop_thread cas_cache/threads.c /^static void _cas_stop_thread(struct cas_thread_info *info)$/;" f file: +_cas_upgrade_check_ctx_state cas_cache/layer_upgrade.c /^static int _cas_upgrade_check_ctx_state(void)$/;" f file: +_cas_upgrade_clear_state cas_cache/layer_upgrade.c /^static void _cas_upgrade_clear_state(void)$/;" f file: +_cas_upgrade_core_visitor cas_cache/layer_upgrade.c /^int _cas_upgrade_core_visitor(ocf_core_t core, void *cntx)$/;" f +_cas_upgrade_destroy_props_array cas_cache/layer_upgrade.c /^static void _cas_upgrade_destroy_props_array($/;" f file: +_cas_upgrade_dump_cache_conf cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_cache_conf(ocf_cache_t device,$/;" f file: +_cas_upgrade_dump_cache_conf_cores cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_cache_conf_cores(ocf_cache_t device,$/;" f file: +_cas_upgrade_dump_cache_conf_flush cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_cache_conf_flush(ocf_cache_t cache,$/;" f file: +_cas_upgrade_dump_cache_conf_io_class cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_cache_conf_io_class(ocf_cache_t cache,$/;" f file: +_cas_upgrade_dump_cache_conf_main cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_cache_conf_main(ocf_cache_t cache,$/;" f file: +_cas_upgrade_dump_cache_conf_visitor cas_cache/layer_upgrade.c /^int _cas_upgrade_dump_cache_conf_visitor(ocf_cache_t cache, void *cntx)$/;" f +_cas_upgrade_dump_conf cas_cache/layer_upgrade.c /^static int _cas_upgrade_dump_conf(void)$/;" f file: +_cas_upgrade_dump_io_class_visit_ctx cas_cache/layer_upgrade.c /^struct _cas_upgrade_dump_io_class_visit_ctx {$/;" s file: +_cas_upgrade_dump_io_class_visitor cas_cache/layer_upgrade.c /^int _cas_upgrade_dump_io_class_visitor(ocf_cache_t cache,$/;" f +_cas_upgrade_init_props_array cas_cache/layer_upgrade.c /^static int _cas_upgrade_init_props_array($/;" f file: +_cas_upgrade_restore_cache cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_cache(struct cas_properties *cache_props)$/;" f file: +_cas_upgrade_restore_cache_after_error cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_cache_after_error($/;" f file: +_cas_upgrade_restore_cache_mode cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_cache_mode(struct cas_properties *cache_props)$/;" f file: +_cas_upgrade_restore_cache_mode_visitor cas_cache/layer_upgrade.c /^int _cas_upgrade_restore_cache_mode_visitor(ocf_core_t core, void *cntx)$/;" f +_cas_upgrade_restore_conf_core cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_conf_core(struct cas_properties *cache_props,$/;" f file: +_cas_upgrade_restore_conf_flush cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_conf_flush(struct cas_properties *cache_props,$/;" f file: +_cas_upgrade_restore_conf_io_class cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_conf_io_class($/;" f file: +_cas_upgrade_restore_conf_main cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_conf_main(struct cas_properties *cache_props,$/;" f file: +_cas_upgrade_restore_configuration cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_configuration($/;" f file: +_cas_upgrade_restore_noop cas_cache/layer_upgrade.c /^static int _cas_upgrade_restore_noop(struct cas_properties *cache_props)$/;" f file: +_cas_upgrade_set_pt_and_flush cas_cache/layer_upgrade.c /^static int _cas_upgrade_set_pt_and_flush(void)$/;" f file: +_cas_upgrade_set_pt_and_flush_visitor_cache cas_cache/layer_upgrade.c /^int _cas_upgrade_set_pt_and_flush_visitor_cache(ocf_cache_t cache, void *cntx)$/;" f +_cas_upgrade_set_state cas_cache/layer_upgrade.c /^static void _cas_upgrade_set_state(void)$/;" f file: +_cas_upgrade_stop_devices cas_cache/layer_upgrade.c /^static int _cas_upgrade_stop_devices(void)$/;" f file: +_cas_upgrade_stop_devices_visitor_exit cas_cache/layer_upgrade.c /^int _cas_upgrade_stop_devices_visitor_exit(ocf_cache_t cache, void *cntx)$/;" f +_cas_upgrade_stop_devices_visitor_wait cas_cache/layer_upgrade.c /^int _cas_upgrade_stop_devices_visitor_wait(ocf_cache_t cache, void *cntx)$/;" f +_casdsk_del_partitions cas_disk/exp_obj.c /^static int _casdsk_del_partitions(struct block_device *bd)$/;" f file: +_casdsk_disk_attrs cas_disk/disk.c /^static struct attribute *_casdsk_disk_attrs[] = {$/;" v typeref:struct:attribute file: +_casdsk_disk_claim cas_disk/disk.c /^static void _casdsk_disk_claim(struct casdsk_disk *dsk, void *private)$/;" f file: +_casdsk_disk_init_kobject cas_disk/disk.c /^static int _casdsk_disk_init_kobject(struct casdsk_disk *dsk)$/;" f file: +_casdsk_disk_mode_attr cas_disk/disk.c /^static struct casdsk_attribute _casdsk_disk_mode_attr =$/;" v typeref:struct:casdsk_attribute file: +_casdsk_disk_mode_show cas_disk/disk.c /^static ssize_t _casdsk_disk_mode_show(struct kobject *kobj, char *page)$/;" f file: +_casdsk_disk_modes cas_disk/disk.c /^static const char * const _casdsk_disk_modes[] = {$/;" v file: +_casdsk_disk_release cas_disk/disk.c /^static void _casdsk_disk_release(struct kobject *kobj)$/;" f file: +_casdsk_exp_obj_begin_rq cas_disk/exp_obj.c /^static inline unsigned int _casdsk_exp_obj_begin_rq(struct casdsk_disk *dsk)$/;" f file: +_casdsk_exp_obj_clear_dev_t cas_disk/exp_obj.c /^static void _casdsk_exp_obj_clear_dev_t(struct casdsk_disk *dsk)$/;" f file: +_casdsk_exp_obj_drain_elevator cas_disk/exp_obj.c /^static void _casdsk_exp_obj_drain_elevator(struct request_queue *q)$/;" f file: +_casdsk_exp_obj_end_rq cas_disk/exp_obj.c /^static inline void _casdsk_exp_obj_end_rq(struct casdsk_disk *dsk, unsigned int cpu)$/;" f file: +_casdsk_exp_obj_exists cas_disk/exp_obj.c /^static bool _casdsk_exp_obj_exists(const char *path)$/;" f file: +_casdsk_exp_obj_flush_queue cas_disk/exp_obj.c /^static void _casdsk_exp_obj_flush_queue(struct casdsk_disk *dsk)$/;" f file: +_casdsk_exp_obj_handle_bio cas_disk/exp_obj.c /^static inline void _casdsk_exp_obj_handle_bio(struct casdsk_disk *dsk,$/;" f file: +_casdsk_exp_obj_handle_bio_att cas_disk/exp_obj.c /^static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk,$/;" f file: +_casdsk_exp_obj_handle_bio_pt cas_disk/exp_obj.c /^static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk,$/;" f file: +_casdsk_exp_obj_hide_parts cas_disk/exp_obj.c /^static int _casdsk_exp_obj_hide_parts(struct casdsk_disk *dsk)$/;" f file: +_casdsk_exp_obj_init_kobject cas_disk/exp_obj.c /^static int _casdsk_exp_obj_init_kobject(struct casdsk_disk *dsk)$/;" f file: +_casdsk_exp_obj_make_rq_fn cas_disk/exp_obj.c /^static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q,$/;" f file: +_casdsk_exp_obj_ops cas_disk/exp_obj.c /^static const struct block_device_operations _casdsk_exp_obj_ops = {$/;" v typeref:struct:block_device_operations file: +_casdsk_exp_obj_prep_rq_fn cas_disk/exp_obj.c /^static int _casdsk_exp_obj_prep_rq_fn(struct request_queue *q, struct request *rq)$/;" f file: +_casdsk_exp_obj_release cas_disk/exp_obj.c /^static void _casdsk_exp_obj_release(struct kobject *kobj)$/;" f file: +_casdsk_exp_obj_request_fn cas_disk/exp_obj.c /^static void _casdsk_exp_obj_request_fn(struct request_queue *q)$/;" f file: +_casdsk_exp_obj_set_dev_t cas_disk/exp_obj.c /^static int _casdsk_exp_obj_set_dev_t(struct casdsk_disk *dsk, struct gendisk *gd)$/;" f file: +_casdsk_exp_obj_wait_for_pending_rqs cas_disk/exp_obj.c /^static void _casdsk_exp_obj_wait_for_pending_rqs(struct casdsk_disk *dsk)$/;" f file: +_casdsk_flags cas_disk/exp_obj.c /^static int _casdsk_flags = GENHD_FL_EXT_DEVT;$/;" v file: +_casdsk_flags cas_disk/exp_obj.c /^static int _casdsk_flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT;$/;" v file: +_casdsk_get_next_part_no cas_disk/exp_obj.c /^static int _casdsk_get_next_part_no(struct block_device *bd)$/;" f file: +_casdsk_module_free_config cas_disk/main.c /^static void _casdsk_module_free_config(struct casdsk_module *mod)$/;" f file: +_casdsk_module_ktype cas_disk/main.c /^static struct kobj_type _casdsk_module_ktype = {$/;" v typeref:struct:kobj_type file: +_casdsk_module_release cas_disk/main.c /^static void _casdsk_module_release(struct kobject *kobj)$/;" f file: +_casdsk_sysfs_show cas_disk/sysfs.c /^static ssize_t _casdsk_sysfs_show(struct kobject *kobj, struct attribute *attr,$/;" f file: +_casdsk_sysfs_store cas_disk/sysfs.c /^static ssize_t _casdsk_sysfs_store(struct kobject *kobj, struct attribute *attr,$/;" f file: +_control_device cas_cache/control.c /^static struct cas_ctrl_device _control_device;$/;" v typeref:struct:cas_ctrl_device file: +_ctrl_dev_fops cas_cache/control.c /^static const struct file_operations _ctrl_dev_fops = {$/;" v typeref:struct:file_operations file: +_env_allocator cas_cache/ocf_env.c /^struct _env_allocator {$/;" s file: +_env_allocator_item cas_cache/ocf_env.c /^struct _env_allocator_item {$/;" s file: +_ocf_core_visitor_ctx cas_cache/layer_upgrade.c /^struct _ocf_core_visitor_ctx {$/;" s file: +activated cas_disk/exp_obj.h /^ bool activated;$/;" m struct:casdsk_exp_obj +addr cas_cache/main.c /^ unsigned long addr;$/;" m struct:exported_symbol file: +addr cas_cache/object/obj_atomic_dev_bottom.c /^ uint64_t addr;$/;" m struct:cas_atomic_io file: +allocator_ctx cas_cache/utils/utils_rpool.c /^ void *allocator_ctx;$/;" m struct:_cas_rpool_pre_alloc_info file: +atomic_dev_deinit cas_cache/object/obj_atomic_dev_bottom.c /^void atomic_dev_deinit(void)$/;" f +atomic_dev_init cas_cache/object/obj_atomic_dev_bottom.c /^int atomic_dev_init(void)$/;" f +atomic_dev_params cas_cache/object/obj_atomic_dev_bottom.h /^struct atomic_dev_params {$/;" s +atomic_io_allocator cas_cache/object/obj_atomic_dev_bottom.c /^static struct ocf_mpool *atomic_io_allocator;$/;" v typeref:struct:ocf_mpool file: +atomic_metadata_mode cas_cache/object/obj_atomic_dev_bottom.h /^enum atomic_metadata_mode {$/;" g +atomic_params cas_cache/object/obj_blk.h /^ struct atomic_dev_params atomic_params;$/;" m struct:bd_volume typeref:struct:bd_volume::atomic_dev_params +attr cas_disk/sysfs.h /^ struct attribute attr;$/;" m struct:casdsk_attribute typeref:struct:casdsk_attribute::attribute +bd cas_disk/disk.h /^ struct block_device *bd;$/;" m struct:casdsk_disk typeref:struct:casdsk_disk::block_device +bd_claim_by_disk cas_disk/cas_disk_defs.h /^static inline int bd_claim_by_disk(struct block_device *bdev, void *holder,$/;" f +bd_release_from_disk cas_disk/cas_disk_defs.h /^static inline void bd_release_from_disk(struct block_device *bdev,$/;" f +bd_volume cas_cache/object/obj_blk.h /^static inline struct bd_volume *bd_volume(ocf_volume_t vol)$/;" f +bd_volume cas_cache/object/obj_blk.h /^struct bd_volume {$/;" s +bio cas_cache/object/obj_atomic_dev_bottom.c /^ struct bio *bio;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::bio file: +bio cas_disk/exp_obj.h /^ struct bio *bio;$/;" m struct:casdsk_exp_obj_pt_io_ctx typeref:struct:casdsk_exp_obj_pt_io_ctx::bio +bio_vec_iter cas_cache/context.h /^struct bio_vec_iter {$/;" s +blk_data cas_cache/context.h /^struct blk_data {$/;" s +blkio cas_cache/object/obj_blk_utils.h /^struct blkio {$/;" s +blobs cas_disk/cas_disk_defs.h /^ struct casdsk_props_conf *blobs;$/;" m struct:casdsk_stored_config typeref:struct:casdsk_stored_config::casdsk_props_conf +block_dev_activate_exported_object cas_cache/object/obj_block_dev_top.c /^int block_dev_activate_exported_object(ocf_core_t core)$/;" f +block_dev_close_object cas_cache/object/obj_block_dev_bottom.c /^void block_dev_close_object(ocf_volume_t vol)$/;" f +block_dev_complete_bio_discard cas_cache/object/obj_block_dev_top.c /^void block_dev_complete_bio_discard(struct ocf_io *io, int error)$/;" f +block_dev_complete_bio_fast cas_cache/object/obj_block_dev_top.c /^void block_dev_complete_bio_fast(struct ocf_io *io, int error)$/;" f +block_dev_complete_flush cas_cache/object/obj_block_dev_top.c /^void block_dev_complete_flush(struct ocf_io *io, int error)$/;" f +block_dev_complete_rq cas_cache/object/obj_block_dev_top.c /^void block_dev_complete_rq(struct ocf_io *io, int error)$/;" f +block_dev_complete_sub_rq cas_cache/object/obj_block_dev_top.c /^void block_dev_complete_sub_rq(struct ocf_io *io, int error)$/;" f +block_dev_create_exported_object cas_cache/object/obj_block_dev_top.c /^int block_dev_create_exported_object(ocf_core_t core)$/;" f +block_dev_deinit cas_cache/object/obj_block_dev_bottom.c /^void block_dev_deinit(void)$/;" f +block_dev_destroy_all_exported_objects cas_cache/object/obj_block_dev_top.c /^int block_dev_destroy_all_exported_objects(ocf_cache_t cache)$/;" f +block_dev_destroy_exported_object cas_cache/object/obj_block_dev_top.c /^int block_dev_destroy_exported_object(ocf_core_t core)$/;" f +block_dev_get_byte_length cas_cache/object/obj_block_dev_bottom.c /^uint64_t block_dev_get_byte_length(ocf_volume_t vol)$/;" f +block_dev_get_elevator_name cas_cache/object/obj_block_dev_bottom.c /^const char *block_dev_get_elevator_name(struct request_queue *q)$/;" f +block_dev_get_max_io_size cas_cache/object/obj_block_dev_bottom.c /^unsigned int block_dev_get_max_io_size(ocf_volume_t vol)$/;" f +block_dev_init cas_cache/object/obj_block_dev_bottom.c /^int block_dev_init(void)$/;" f +block_dev_is_metadata_mode_optimal cas_cache/object/obj_block_dev_bottom.c /^int block_dev_is_metadata_mode_optimal(struct atomic_dev_params *atomic_params,$/;" f +block_dev_open_object cas_cache/object/obj_block_dev_bottom.c /^int block_dev_open_object(ocf_volume_t vol)$/;" f +block_dev_start_bio_fast cas_cache/object/obj_block_dev_top.c /^void block_dev_start_bio_fast(struct ocf_io *io)$/;" f +block_dev_submit_discard cas_cache/object/obj_block_dev_bottom.c /^void block_dev_submit_discard(struct ocf_io *io)$/;" f +block_dev_submit_flush cas_cache/object/obj_block_dev_bottom.c /^static void block_dev_submit_flush(struct ocf_io *io)$/;" f file: +block_dev_submit_io cas_cache/object/obj_block_dev_bottom.c /^static void block_dev_submit_io(struct ocf_io *io)$/;" f file: +block_dev_try_get_io_class cas_cache/object/obj_block_dev_bottom.c /^int block_dev_try_get_io_class(struct bio *bio, int *io_class)$/;" f +btm_bd cas_cache/object/obj_blk.h /^ struct block_device *btm_bd;$/;" m struct:bd_volume typeref:struct:bd_volume::block_device +buffer cas_disk/cas_disk.h /^ void *buffer;$/;" m struct:casdsk_props_conf +bvec_size cas_cache/object/obj_atomic_dev_bottom.c /^ uint32_t bvec_size;$/;" m struct:cas_atomic_io file: +bytes cas_cache/object/obj_atomic_dev_bottom.c /^ uint32_t bytes;$/;" m struct:cas_atomic_io file: +cache_count include/cas_ioctl_codes.h /^ int cache_count;$/;" m struct:kcas_cache_count +cache_dirty include/cas_ioctl_codes.h /^ bool cache_dirty;$/;" m struct:kcas_cache_check_device +cache_elevator include/cas_ioctl_codes.h /^ char cache_elevator[MAX_ELEVATOR_NAME];$/;" m struct:kcas_start_cache +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_flush_cache +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_flush_core +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_insert_core +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_interrupt_flushing +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_remove_core +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of an running cache *\/$/;" m struct:kcas_reset_stats +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of cache for which state should be set *\/$/;" m struct:kcas_set_cache_state +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id; \/**< id of cache to be stopped *\/$/;" m struct:kcas_stop_cache +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_cache_info +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_core_info +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_get_cache_param +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_get_core_param +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_io_class +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_io_classes +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_set_cache_param +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_set_core_param +cache_id include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id;$/;" m struct:kcas_start_cache +cache_id_tab include/cas_ioctl_codes.h /^ ocf_cache_id_t cache_id_tab[CACHE_LIST_ID_LIMIT];$/;" m struct:kcas_cache_list +cache_mng_add_core_to_cache cas_cache/layer_cache_management.c /^int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg,$/;" f +cache_mng_cache_check_device cas_cache/layer_cache_management.c /^int cache_mng_cache_check_device(struct kcas_cache_check_device *cmd_info)$/;" f +cache_mng_core_pool_get_paths cas_cache/layer_cache_management.c /^int cache_mng_core_pool_get_paths(struct kcas_core_pool_path *cmd_info)$/;" f +cache_mng_core_pool_remove cas_cache/layer_cache_management.c /^int cache_mng_core_pool_remove(struct kcas_core_pool_remove *cmd_info)$/;" f +cache_mng_exit_instance cas_cache/layer_cache_management.c /^int cache_mng_exit_instance(ocf_cache_id_t id, int flush)$/;" f +cache_mng_flush_device cas_cache/layer_cache_management.c /^int cache_mng_flush_device(ocf_cache_id_t id)$/;" f +cache_mng_flush_object cas_cache/layer_cache_management.c /^int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id,$/;" f +cache_mng_get_cache_params cas_cache/layer_cache_management.c /^int cache_mng_get_cache_params(struct kcas_get_cache_param *info)$/;" f +cache_mng_get_cleaning_param cas_cache/layer_cache_management.c /^int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type,$/;" f +cache_mng_get_cleaning_policy cas_cache/layer_cache_management.c /^int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type)$/;" f +cache_mng_get_core_info cas_cache/layer_cache_management.c /^int cache_mng_get_core_info(struct kcas_core_info *info)$/;" f +cache_mng_get_core_params cas_cache/layer_cache_management.c /^int cache_mng_get_core_params(struct kcas_get_core_param *info)$/;" f +cache_mng_get_info cas_cache/layer_cache_management.c /^int cache_mng_get_info(struct kcas_cache_info *info)$/;" f +cache_mng_get_io_class_info cas_cache/layer_cache_management.c /^int cache_mng_get_io_class_info(struct kcas_io_class *part)$/;" f +cache_mng_get_seq_cutoff_policy cas_cache/layer_cache_management.c /^int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id,$/;" f +cache_mng_get_seq_cutoff_threshold cas_cache/layer_cache_management.c /^int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t cache_id,$/;" f +cache_mng_init_instance cas_cache/layer_cache_management.c /^int cache_mng_init_instance(struct ocf_mngt_cache_config *cfg,$/;" f +cache_mng_initialize_core_objects cas_cache/layer_cache_management.c /^static int cache_mng_initialize_core_objects(ocf_cache_t cache)$/;" f file: +cache_mng_interrupt_flushing cas_cache/layer_cache_management.c /^int cache_mng_interrupt_flushing(ocf_cache_id_t id)$/;" f +cache_mng_list_caches cas_cache/layer_cache_management.c /^int cache_mng_list_caches(struct kcas_cache_list *list)$/;" f +cache_mng_list_caches_visitor cas_cache/layer_cache_management.c /^static int cache_mng_list_caches_visitor(ocf_cache_t cache, void *cntx)$/;" f file: +cache_mng_prepare_cache_cfg cas_cache/layer_cache_management.c /^int cache_mng_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg,$/;" f +cache_mng_prepare_core_cfg cas_cache/layer_cache_management.c /^int cache_mng_prepare_core_cfg(struct ocf_mngt_core_config *cfg,$/;" f +cache_mng_remove_core_from_cache cas_cache/layer_cache_management.c /^int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd)$/;" f +cache_mng_reset_core_stats cas_cache/layer_cache_management.c /^int cache_mng_reset_core_stats(ocf_cache_id_t cache_id,$/;" f +cache_mng_set_cache_mode cas_cache/layer_cache_management.c /^int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode,$/;" f +cache_mng_set_cache_params cas_cache/layer_cache_management.c /^int cache_mng_set_cache_params(struct kcas_set_cache_param *info)$/;" f +cache_mng_set_cleaning_param cas_cache/layer_cache_management.c /^int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type,$/;" f +cache_mng_set_cleaning_policy cas_cache/layer_cache_management.c /^int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type)$/;" f +cache_mng_set_core_params cas_cache/layer_cache_management.c /^int cache_mng_set_core_params(struct kcas_set_core_param *info)$/;" f +cache_mng_set_partitions cas_cache/layer_cache_management.c /^int cache_mng_set_partitions(struct kcas_io_classes *cfg)$/;" f +cache_mng_set_seq_cutoff_policy cas_cache/layer_cache_management.c /^int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id,$/;" f +cache_mng_set_seq_cutoff_threshold cas_cache/layer_cache_management.c /^int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t cache_id, ocf_core_id_t core_id,$/;" f +cache_mng_update_core_uuid cas_cache/layer_cache_management.c /^int cache_mng_update_core_uuid(ocf_cache_t cache, ocf_core_id_t id, ocf_uuid_t uuid)$/;" f +cache_mng_wait_for_rq_finish cas_cache/layer_cache_management.c /^void cache_mng_wait_for_rq_finish(ocf_cache_t cache)$/;" f +cache_mng_wait_for_rq_finish_visitor cas_cache/layer_cache_management.c /^static int cache_mng_wait_for_rq_finish_visitor(ocf_core_t core, void *cntx)$/;" f file: +cache_param_cleaning_acp_flush_max_buffers include/cas_ioctl_codes.h /^ cache_param_cleaning_acp_flush_max_buffers,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_acp_wake_up_time include/cas_ioctl_codes.h /^ cache_param_cleaning_acp_wake_up_time,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_alru_activity_threshold include/cas_ioctl_codes.h /^ cache_param_cleaning_alru_activity_threshold,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_alru_flush_max_buffers include/cas_ioctl_codes.h /^ cache_param_cleaning_alru_flush_max_buffers,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_alru_stale_buffer_time include/cas_ioctl_codes.h /^ cache_param_cleaning_alru_stale_buffer_time,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_alru_wake_up_time include/cas_ioctl_codes.h /^ cache_param_cleaning_alru_wake_up_time,$/;" e enum:kcas_cache_param_id +cache_param_cleaning_policy_type include/cas_ioctl_codes.h /^ cache_param_cleaning_policy_type,$/;" e enum:kcas_cache_param_id +cache_param_id_max include/cas_ioctl_codes.h /^ cache_param_id_max,$/;" e enum:kcas_cache_param_id +cache_path_name include/cas_ioctl_codes.h /^ char cache_path_name[MAX_STR_LEN]; \/**< path to an ssd*\/$/;" m struct:kcas_start_cache +cache_path_name include/cas_ioctl_codes.h /^ char cache_path_name[MAX_STR_LEN];$/;" m struct:kcas_cache_info +cache_props cas_cache/layer_upgrade.c /^ struct cas_properties *cache_props;$/;" m struct:_cas_upgrade_dump_io_class_visit_ctx typeref:struct:_cas_upgrade_dump_io_class_visit_ctx::cas_properties file: +cache_props cas_cache/layer_upgrade.c /^ struct cas_properties *cache_props;$/;" m struct:_ocf_core_visitor_ctx typeref:struct:_ocf_core_visitor_ctx::cas_properties file: +caches_no_init cas_cache/layer_upgrade.c /^size_t caches_no_init;$/;" v +caches_props_array cas_cache/layer_upgrade.c /^ struct cas_properties **caches_props_array;$/;" m struct:_cas_cache_dump_conf_visitor_ctx typeref:struct:_cas_cache_dump_conf_visitor_ctx::cas_properties file: +caches_serialized_conf cas_cache/layer_upgrade.c /^ struct casdsk_props_conf *caches_serialized_conf;$/;" m struct:_cas_cache_dump_conf_visitor_ctx typeref:struct:_cas_cache_dump_conf_visitor_ctx::casdsk_props_conf file: +caches_serialized_conf_init cas_cache/layer_upgrade.c /^struct casdsk_props_conf *caches_serialized_conf_init;$/;" v typeref:struct:casdsk_props_conf +caching_mode include/cas_ioctl_codes.h /^ ocf_cache_mode_t caching_mode;$/;" m struct:kcas_set_cache_state +caching_mode include/cas_ioctl_codes.h /^ ocf_cache_mode_t caching_mode;$/;" m struct:kcas_start_cache +cas_alloc_blk_data cas_cache/context.c /^struct blk_data *cas_alloc_blk_data(uint32_t size, gfp_t flags)$/;" f +cas_atomic_alloc cas_cache/object/obj_atomic_dev_bottom.c /^static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool write_zero)$/;" f file: +cas_atomic_close_object cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_close_object(ocf_data_obj_t obj)$/;" f +cas_atomic_dealloc cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_dealloc(struct cas_atomic_io *atomics)$/;" f file: +cas_atomic_end_atom cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_end_atom(struct cas_atomic_io *atom, int error)$/;" f file: +cas_atomic_end_request cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_end_request(struct request *request, int error)$/;" f file: +cas_atomic_fire_atom cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_fire_atom(int dir, struct ocf_io *io,$/;" f file: +cas_atomic_fire_atoms cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_fire_atoms(int dir, struct ocf_io *io,$/;" f file: +cas_atomic_fire_io cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_fire_io(struct ocf_io *io,$/;" f file: +cas_atomic_get_length cas_cache/object/obj_atomic_dev_bottom.c /^uint64_t cas_atomic_get_length(ocf_data_obj_t obj)$/;" f +cas_atomic_get_max_io_size cas_cache/object/obj_atomic_dev_bottom.c /^unsigned int cas_atomic_get_max_io_size(ocf_data_obj_t obj)$/;" f +cas_atomic_io cas_cache/object/obj_atomic_dev_bottom.c /^struct cas_atomic_io {$/;" s file: +cas_atomic_max_io_sectors cas_cache/object/obj_atomic_dev_bottom.c /^static inline uint32_t cas_atomic_max_io_sectors(void)$/;" f file: +cas_atomic_open_object cas_cache/object/obj_atomic_dev_bottom.c /^int cas_atomic_open_object(ocf_data_obj_t obj)$/;" f +cas_atomic_prepare_atoms cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_prepare_atoms(struct ocf_io *io,$/;" f file: +cas_atomic_rd_complete cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_rd_complete(struct cas_atomic_io *atom)$/;" f file: +cas_atomic_rd_metadata_complete cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_rd_metadata_complete(struct cas_atomic_io *atom)$/;" f file: +cas_atomic_rd_metadata_prepare cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_rd_metadata_prepare(struct ocf_io *io,$/;" f file: +cas_atomic_rd_prepare cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_rd_prepare(struct ocf_io *io,$/;" f file: +cas_atomic_setup_cmd cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_setup_cmd(int dir, struct cas_atomic_io *atom)$/;" f file: +cas_atomic_size_of cas_cache/object/obj_atomic_dev_bottom.c /^static inline uint32_t cas_atomic_size_of(uint32_t size)$/;" f file: +cas_atomic_special_req_prepare cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_special_req_prepare(struct cas_atomic_io *atom,$/;" f file: +cas_atomic_submit_discard cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_submit_discard(struct ocf_io *io)$/;" f +cas_atomic_submit_discard_bio cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom)$/;" f file: +cas_atomic_submit_flush cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_submit_flush(struct ocf_io *io)$/;" f +cas_atomic_submit_flush_bio cas_cache/object/obj_atomic_dev_bottom.c /^static void cas_atomic_submit_flush_bio(struct cas_atomic_io *atom)$/;" f file: +cas_atomic_submit_io cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_submit_io(struct ocf_io *io)$/;" f +cas_atomic_submit_metadata cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_submit_metadata(struct ocf_io *io)$/;" f +cas_atomic_submit_write_zeroes cas_cache/object/obj_atomic_dev_bottom.c /^void cas_atomic_submit_write_zeroes(struct ocf_io *io)$/;" f +cas_atomic_wr_prepare cas_cache/object/obj_atomic_dev_bottom.c /^static int cas_atomic_wr_prepare(struct ocf_io *io,$/;" f file: +cas_atomic_write_zero_ctx cas_cache/object/obj_atomic_dev_bottom.c /^struct cas_atomic_write_zero_ctx$/;" s file: +cas_bd_io_alloc_bio cas_cache/object/obj_block_dev_bottom.c /^static inline struct bio *cas_bd_io_alloc_bio(struct blkio *bdio)$/;" f file: +cas_bd_io_end cas_cache/object/obj_block_dev_bottom.c /^static void cas_bd_io_end(struct ocf_io *io, int error)$/;" f file: +cas_bd_io_prepare cas_cache/object/obj_block_dev_bottom.c /^static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io)$/;" f file: +cas_bio_clone cas_cache/linux_kernel_version.h /^static inline struct bio *cas_bio_clone(struct bio *bio, gfp_t gfp_mask)$/;" f +cas_blk_close_volume cas_cache/object/obj_blk_utils.c /^void cas_blk_close_volume(ocf_volume_t vol)$/;" f +cas_blk_get_part_count cas_cache/utils/utils_blk.c /^int cas_blk_get_part_count(struct block_device *bdev)$/;" f +cas_blk_identify_type cas_cache/object/obj_blk_utils.c /^int cas_blk_identify_type(const char *path, uint8_t *type)$/;" f +cas_blk_identify_type_atomic cas_cache/object/obj_blk_utils.c /^int cas_blk_identify_type_atomic(const char *path, uint8_t *type,$/;" f +cas_blk_identify_type_by_bdev cas_cache/object/obj_blk_utils.c /^int cas_blk_identify_type_by_bdev(struct block_device *bdev,$/;" f +cas_blk_io_get_data cas_cache/object/obj_blk_utils.c /^ctx_data_t *cas_blk_io_get_data(struct ocf_io *io)$/;" f +cas_blk_io_set_data cas_cache/object/obj_blk_utils.c /^int cas_blk_io_set_data(struct ocf_io *io,$/;" f +cas_blk_is_flush_io cas_cache/object/obj_blk_utils.h /^static inline bool cas_blk_is_flush_io(unsigned long flags)$/;" f +cas_blk_make_request cas_cache/linux_kernel_version.h /^static inline struct request *cas_blk_make_request(struct request_queue *q,$/;" f +cas_blk_open_volume_by_bdev cas_cache/object/obj_blk_utils.c /^int cas_blk_open_volume_by_bdev(ocf_volume_t *vol,$/;" f +cas_blk_queue_bounce cas_cache/linux_kernel_version.h 375;" d +cas_blk_queue_bounce cas_cache/linux_kernel_version.h 377;" d +cas_blk_rq_append_bio cas_cache/linux_kernel_version.h 381;" d +cas_blk_rq_append_bio cas_cache/linux_kernel_version.h 383;" d +cas_blk_rq_set_block_pc cas_cache/linux_kernel_version.h 369;" d +cas_blk_rq_set_block_pc cas_cache/linux_kernel_version.h 371;" d +cas_bvec_pages_rpool cas_cache/context.c /^struct cas_reserve_pool *cas_bvec_pages_rpool;$/;" v typeref:struct:cas_reserve_pool +cas_bvec_pool cas_cache/context.c /^struct ocf_mpool *cas_bvec_pool;$/;" v typeref:struct:ocf_mpool +cas_cache-c cas_cache/Makefile /^cas_cache-c = $(shell find $(M)\/cas_cache -name \\*.c)$/;" m +cas_cache-objs cas_cache/Makefile /^cas_cache-objs = $(patsubst $(M)\/cas_cache\/%.c,%.o,$(cas_cache-c))$/;" m +cas_casdisk_lookup_funtions cas_cache/main.c /^int static cas_casdisk_lookup_funtions(void)$/;" f file: +cas_cleanup_context cas_cache/context.c /^int cas_cleanup_context(void)$/;" f +cas_copy_queue_limits cas_cache/linux_kernel_version.h /^static inline void cas_copy_queue_limits(struct request_queue *exp_q,$/;" f +cas_create_cleaner_thread cas_cache/threads.c /^int cas_create_cleaner_thread(ocf_cleaner_t c)$/;" f +cas_create_metadata_updater_thread cas_cache/threads.c /^int cas_create_metadata_updater_thread(ocf_metadata_updater_t mu)$/;" f +cas_create_queue_thread cas_cache/threads.c /^int cas_create_queue_thread(ocf_queue_t q)$/;" f +cas_ctrl_device cas_cache/control.c /^struct cas_ctrl_device {$/;" s file: +cas_ctrl_device_deinit cas_cache/control.c /^void __exit cas_ctrl_device_deinit(void)$/;" f +cas_ctrl_device_init cas_cache/control.c /^int __init cas_ctrl_device_init(void)$/;" f +cas_ctx cas_cache/main.c /^ocf_ctx_t cas_ctx;$/;" v +cas_ctx_data_alloc cas_cache/context.c /^ctx_data_t *cas_ctx_data_alloc(uint32_t pages)$/;" f +cas_ctx_data_free cas_cache/context.c /^void cas_ctx_data_free(ctx_data_t *ctx_data)$/;" f +cas_ctx_data_secure_erase cas_cache/context.c /^void cas_ctx_data_secure_erase(ctx_data_t *ctx_data)$/;" f +cas_ctx_data_zalloc cas_cache/context.c /^ctx_data_t *cas_ctx_data_zalloc(uint32_t pages)$/;" f +cas_data_cpy cas_cache/utils/utils_data.c /^uint64_t cas_data_cpy(struct bio_vec *dst, uint64_t dst_num,$/;" f +cas_disk-objs cas_disk/Makefile /^cas_disk-objs = main.o$/;" m +cas_dss_bio_io_class cas_cache/cas_cache_dss.c /^int cas_dss_bio_io_class(ocf_cache_t cache, struct bio *bio)$/;" f +cas_dss_file_size_tag cas_cache/cas_cache_dss.c /^static inline int cas_dss_file_size_tag(struct inode *inode)$/;" f file: +cas_error cas_cache/service_ui_ioctl.c /^ int cas_error;$/;" m struct:__anon4 file: +cas_error_code_map cas_cache/service_ui_ioctl.c /^} static cas_error_code_map[] = {$/;" v file: +cas_exit_module cas_cache/main.c /^module_exit(cas_exit_module);$/;" v +cas_exit_module cas_cache/main.c /^static void __exit cas_exit_module(void)$/;" f file: +cas_find_symbol cas_cache/main.c /^int static cas_find_symbol(void *data, const char *namebuf,$/;" f file: +cas_free_blk_data cas_cache/context.c /^void cas_free_blk_data(struct blk_data *data)$/;" f +cas_garbage_collector cas_cache/utils/utils_gc.c /^static void cas_garbage_collector(struct work_struct *w)$/;" f file: +cas_garbage_collector_deinit cas_cache/utils/utils_gc.c /^void cas_garbage_collector_deinit(void) {};$/;" f +cas_garbage_collector_deinit cas_cache/utils/utils_gc.c /^void cas_garbage_collector_deinit(void)$/;" f +cas_garbage_collector_init cas_cache/utils/utils_gc.c /^void cas_garbage_collector_init(void) {};$/;" f +cas_garbage_collector_init cas_cache/utils/utils_gc.c /^void cas_garbage_collector_init(void)$/;" f +cas_generic_end_io_acct cas_cache/linux_kernel_version.h /^static inline void cas_generic_end_io_acct(struct request_queue *q,$/;" f +cas_generic_start_io_acct cas_cache/linux_kernel_version.h /^static inline void cas_generic_start_io_acct(struct request_queue *q,$/;" f +cas_getustimeofday cas_cache/cas_cache.h /^static inline unsigned long long cas_getustimeofday(void)$/;" f +cas_global_zone_page_state cas_cache/linux_kernel_version.h /^static inline unsigned long cas_global_zone_page_state(enum zone_stat_item item)$/;" f +cas_init_module cas_cache/main.c /^module_init(cas_init_module);$/;" v +cas_init_module cas_cache/main.c /^static int __init cas_init_module(void)$/;" f file: +cas_initialize_context cas_cache/context.c /^int cas_initialize_context(void)$/;" f +cas_io_iter_advanced cas_cache/object/obj_blk_utils.c /^static void cas_io_iter_advanced(struct bio_vec_iter *iter, uint32_t bytes)$/;" f file: +cas_io_iter_copy_set cas_cache/object/obj_blk_utils.h /^static inline void cas_io_iter_copy_set(struct bio_vec_iter *dst,$/;" f +cas_io_iter_cpy cas_cache/object/obj_blk_utils.c /^uint32_t cas_io_iter_cpy(struct bio_vec_iter *dst, struct bio_vec_iter *src,$/;" f +cas_io_iter_cpy_from_data cas_cache/object/obj_blk_utils.c /^uint32_t cas_io_iter_cpy_from_data(struct bio_vec_iter *dst,$/;" f +cas_io_iter_cpy_to_data cas_cache/object/obj_blk_utils.c /^uint32_t cas_io_iter_cpy_to_data(void *dst, struct bio_vec_iter *src,$/;" f +cas_io_iter_current_length cas_cache/object/obj_blk_utils.h /^static inline uint32_t cas_io_iter_current_length(struct bio_vec_iter *iter)$/;" f +cas_io_iter_current_offset cas_cache/object/obj_blk_utils.h /^static inline uint32_t cas_io_iter_current_offset(struct bio_vec_iter *iter)$/;" f +cas_io_iter_current_page cas_cache/object/obj_blk_utils.h /^static inline struct page *cas_io_iter_current_page(struct bio_vec_iter *iter)$/;" f +cas_io_iter_init cas_cache/object/obj_blk_utils.h /^static inline void cas_io_iter_init(struct bio_vec_iter *iter,$/;" f +cas_io_iter_is_next cas_cache/object/obj_blk_utils.h /^static inline bool cas_io_iter_is_next(struct bio_vec_iter *iter)$/;" f +cas_io_iter_move cas_cache/object/obj_blk_utils.c /^uint32_t cas_io_iter_move(struct bio_vec_iter *iter, uint32_t bytes)$/;" f +cas_io_iter_set cas_cache/object/obj_blk_utils.h /^static inline void cas_io_iter_set(struct bio_vec_iter *iter,$/;" f +cas_io_iter_size_done cas_cache/object/obj_blk_utils.h /^static inline uint32_t cas_io_iter_size_done(struct bio_vec_iter *iter)$/;" f +cas_io_iter_size_left cas_cache/object/obj_blk_utils.h /^static inline uint32_t cas_io_iter_size_left(struct bio_vec_iter *iter)$/;" f +cas_io_iter_zero cas_cache/object/obj_blk_utils.c /^uint32_t cas_io_iter_zero(struct bio_vec_iter *dst, uint32_t bytes)$/;" f +cas_io_to_blkio cas_cache/object/obj_blk_utils.h /^static inline struct blkio *cas_io_to_blkio(struct ocf_io *io)$/;" f +cas_kick_metadata_updater_thread cas_cache/threads.c /^void cas_kick_metadata_updater_thread(ocf_metadata_updater_t mu)$/;" f +cas_kick_queue_thread cas_cache/threads.c /^void cas_kick_queue_thread(ocf_queue_t q)$/;" f +cas_lookup_symbol cas_cache/main.c 69;" d file: +cas_nvme_format_optimal cas_cache/utils/utils_nvme.c /^int cas_nvme_format_optimal(const char *device_path, int metadata_mode,$/;" f +cas_nvme_get_nsid cas_cache/utils/utils_nvme.c /^int cas_nvme_get_nsid(struct block_device *bdev, unsigned int *nsid)$/;" f +cas_nvme_identify_ctrl cas_cache/utils/utils_nvme.c /^int cas_nvme_identify_ctrl(struct block_device *bdev,$/;" f +cas_nvme_identify_ns cas_cache/utils/utils_nvme.c /^int cas_nvme_identify_ns(struct block_device *bdev, unsigned int nsid,$/;" f +cas_nvme_identify_ns_contorller cas_cache/utils/utils_nvme.c /^int cas_nvme_identify_ns_contorller(struct file *file, struct nvme_id_ns *ns)$/;" f +cas_object_atomic_properties cas_cache/object/obj_atomic_dev_bottom.c /^const struct ocf_data_obj_properties cas_object_atomic_properties = {$/;" v typeref:struct:ocf_data_obj_properties +cas_object_blk_properties cas_cache/object/obj_block_dev_bottom.c /^const struct ocf_volume_properties cas_object_blk_properties = {$/;" v typeref:struct:ocf_volume_properties +cas_prepare_atom_pfn_t cas_cache/object/obj_atomic_dev_bottom.c /^typedef int (*cas_prepare_atom_pfn_t)(struct ocf_io *io,$/;" t file: +cas_prop_strncpy cas_cache/utils/utils_properties.h 10;" d +cas_prop_strncpy cas_cache/utils/utils_properties.h 14;" d +cas_prop_strnlen cas_cache/utils/utils_properties.h 12;" d +cas_prop_strnlen cas_cache/utils/utils_properties.h 16;" d +cas_properites_parse cas_cache/utils/utils_properties.c /^cas_properites_parse(struct casdsk_props_conf *caches_serialized_conf)$/;" f +cas_properites_parse_version cas_cache/utils/utils_properties.c /^int cas_properites_parse_version(struct casdsk_props_conf *caches_serialized_conf,$/;" f +cas_properties cas_cache/utils/utils_properties.c /^struct cas_properties {$/;" s file: +cas_properties_add_sint cas_cache/utils/utils_properties.c /^int cas_properties_add_sint(struct cas_properties *props, const char *key,$/;" f +cas_properties_add_string cas_cache/utils/utils_properties.c /^int cas_properties_add_string(struct cas_properties *props, const char *key,$/;" f +cas_properties_add_uint cas_cache/utils/utils_properties.c /^int cas_properties_add_uint(struct cas_properties *props, const char *key,$/;" f +cas_properties_create cas_cache/utils/utils_properties.c /^struct cas_properties *cas_properties_create(void)$/;" f +cas_properties_destroy cas_cache/utils/utils_properties.c /^void cas_properties_destroy(struct cas_properties *props)$/;" f +cas_properties_get_sint cas_cache/utils/utils_properties.c /^int cas_properties_get_sint(struct cas_properties *props, const char *key,$/;" f +cas_properties_get_string cas_cache/utils/utils_properties.c /^int cas_properties_get_string(struct cas_properties *props, const char *key,$/;" f +cas_properties_get_uint cas_cache/utils/utils_properties.c /^int cas_properties_get_uint(struct cas_properties *props, const char *key,$/;" f +cas_properties_print cas_cache/utils/utils_properties.c /^void cas_properties_print(struct cas_properties *props)$/;" f +cas_properties_serialize cas_cache/utils/utils_properties.c /^int cas_properties_serialize(struct cas_properties *props,$/;" f +cas_property_sint cas_cache/utils/utils_properties.c /^ cas_property_sint = 16,$/;" e enum:cas_property_type file: +cas_property_sint_const cas_cache/utils/utils_properties.c /^ cas_property_sint_const = (cas_property_sint | CAS_PROPERTIES_CONST),$/;" e enum:cas_property_type file: +cas_property_string cas_cache/utils/utils_properties.c /^ cas_property_string = 10,$/;" e enum:cas_property_type file: +cas_property_string_const cas_cache/utils/utils_properties.c /^ cas_property_string_const =$/;" e enum:cas_property_type file: +cas_property_type cas_cache/utils/utils_properties.c /^enum cas_property_type {$/;" g file: +cas_property_uint cas_cache/utils/utils_properties.c /^ cas_property_uint = 74,$/;" e enum:cas_property_type file: +cas_property_uint_const cas_cache/utils/utils_properties.c /^ cas_property_uint_const = (cas_property_uint | CAS_PROPERTIES_CONST),$/;" e enum:cas_property_type file: +cas_queue_flag_set_unlocked cas_cache/linux_kernel_version.h 599;" d +cas_queue_flag_set_unlocked cas_cache/linux_kernel_version.h 602;" d +cas_reserve_pool cas_cache/utils/utils_rpool.c /^struct cas_reserve_pool {$/;" s file: +cas_rpool_create cas_cache/utils/utils_rpool.c /^struct cas_reserve_pool *cas_rpool_create(uint32_t limit, char *name,$/;" f +cas_rpool_del cas_cache/utils/utils_rpool.h /^typedef void (*cas_rpool_del)(void *allocator_ctx, void *item);$/;" t +cas_rpool_destroy cas_cache/utils/utils_rpool.c /^void cas_rpool_destroy(struct cas_reserve_pool *rpool_master,$/;" f +cas_rpool_new cas_cache/utils/utils_rpool.h /^typedef void *(*cas_rpool_new)(void *allocator_ctx, int cpu);$/;" t +cas_rpool_try_get cas_cache/utils/utils_rpool.c /^void *cas_rpool_try_get(struct cas_reserve_pool *rpool_master, int *cpu)$/;" f +cas_rpool_try_put cas_cache/utils/utils_rpool.c /^int cas_rpool_try_put(struct cas_reserve_pool *rpool_master, void *entry, int cpu)$/;" f +cas_service_ioctl_ctrl cas_cache/service_ui_ioctl.c /^long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd,$/;" f +cas_set_queue_flush_fua cas_cache/linux_kernel_version.h /^static inline void cas_set_queue_flush_fua(struct request_queue *q,$/;" f +cas_stop_cleaner_thread cas_cache/threads.c /^void cas_stop_cleaner_thread(ocf_cleaner_t c)$/;" f +cas_stop_metadata_updater_thread cas_cache/threads.c /^void cas_stop_metadata_updater_thread(ocf_metadata_updater_t mu)$/;" f +cas_stop_queue_thread cas_cache/threads.c /^void cas_stop_queue_thread(ocf_queue_t q)$/;" f +cas_submit_bio cas_cache/linux_kernel_version.h /^static inline blk_qc_t cas_submit_bio(int rw, struct bio *bio)$/;" f +cas_submit_bio cas_cache/linux_kernel_version.h /^static inline void cas_submit_bio(int rw, struct bio *bio)$/;" f +cas_thread_info cas_cache/threads.c /^struct cas_thread_info {$/;" s file: +cas_upgrade cas_cache/layer_upgrade.c /^int cas_upgrade(void)$/;" f +cas_upgrade_check_ctx_visitor cas_cache/layer_upgrade.c /^int cas_upgrade_check_ctx_visitor(ocf_cache_t cache, void *cntx)$/;" f +cas_upgrade_finish cas_cache/layer_upgrade.c /^int cas_upgrade_finish(void)$/;" f +cas_upgrade_get_configuration cas_cache/layer_upgrade.c /^int cas_upgrade_get_configuration(void)$/;" f +cas_upgrade_is_in_upgrade cas_cache/layer_upgrade.c /^bool cas_upgrade_is_in_upgrade(void)$/;" f +cas_upgrade_set_pt_and_flush_visitor_core cas_cache/layer_upgrade.c /^int cas_upgrade_set_pt_and_flush_visitor_core(ocf_core_t core, void *cntx)$/;" f +cas_upgrade_verify cas_cache/layer_upgrade.c /^int cas_upgrade_verify(void)$/;" f +cas_vfree cas_cache/utils/utils_gc.c /^void cas_vfree(const void *addr) { vfree(addr); };$/;" f +cas_vfree cas_cache/utils/utils_gc.c /^void cas_vfree(const void *addr)$/;" f +cas_vfree_item cas_cache/utils/utils_gc.c /^struct cas_vfree_item {$/;" s file: +cas_vm_mmap cas_cache/linux_kernel_version.h /^static inline unsigned long cas_vm_mmap(struct file *file,$/;" f +cas_vm_munmap cas_cache/linux_kernel_version.h /^static inline int cas_vm_munmap(unsigned long start, size_t len)$/;" f +casdisk_functions cas_cache/main.c /^struct casdsk_functions_mapper casdisk_functions;$/;" v typeref:struct:casdsk_functions_mapper +casdsk_attribute cas_disk/sysfs.h /^struct casdsk_attribute {$/;" s +casdsk_deinit_disks cas_disk/disk.c /^void casdsk_deinit_disks(void)$/;" f +casdsk_deinit_exp_objs cas_disk/exp_obj.c /^void casdsk_deinit_exp_objs(void)$/;" f +casdsk_deinit_kobjects cas_disk/main.c /^static void __exit casdsk_deinit_kobjects(void)$/;" f file: +casdsk_disk cas_disk/disk.h /^struct casdsk_disk {$/;" s +casdsk_disk_allocate_minors cas_disk/disk.c /^int casdsk_disk_allocate_minors(int count)$/;" f +casdsk_disk_attach cas_cache/cas_cache.h /^ int (*casdsk_disk_attach) (struct casdsk_disk *dsk, struct module *owner,$/;" m struct:casdsk_functions_mapper +casdsk_disk_attach cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_attach);$/;" v +casdsk_disk_attach cas_disk/disk.c /^int casdsk_disk_attach(struct casdsk_disk *dsk, struct module *owner,$/;" f +casdsk_disk_claim cas_cache/cas_cache.h /^ struct casdsk_disk *(*casdsk_disk_claim)(const char *path, void *private);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_disk_claim +casdsk_disk_claim cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_claim);$/;" v +casdsk_disk_claim cas_disk/disk.c /^struct casdsk_disk *casdsk_disk_claim(const char *path, void *private)$/;" f +casdsk_disk_clear_pt cas_cache/cas_cache.h /^ int (*casdsk_disk_clear_pt)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_disk_clear_pt cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_clear_pt);$/;" v +casdsk_disk_clear_pt cas_disk/disk.c /^int casdsk_disk_clear_pt(struct casdsk_disk *dsk)$/;" f +casdsk_disk_close cas_cache/cas_cache.h /^ void (*casdsk_disk_close)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_disk_close cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_close);$/;" v +casdsk_disk_close cas_disk/disk.c /^void casdsk_disk_close(struct casdsk_disk *dsk)$/;" f +casdsk_disk_dettach cas_cache/cas_cache.h /^ int (*casdsk_disk_dettach)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_disk_dettach cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_dettach);$/;" v +casdsk_disk_dettach cas_disk/disk.c /^int casdsk_disk_dettach(struct casdsk_disk *dsk)$/;" f +casdsk_disk_get_blkdev cas_cache/cas_cache.h /^ struct block_device *(*casdsk_disk_get_blkdev)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_disk_get_blkdev +casdsk_disk_get_blkdev cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_get_blkdev);$/;" v +casdsk_disk_get_blkdev cas_disk/disk.c /^struct block_device *casdsk_disk_get_blkdev(struct casdsk_disk *dsk)$/;" f +casdsk_disk_get_gendisk cas_cache/cas_cache.h /^ struct gendisk *(*casdsk_disk_get_gendisk)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_disk_get_gendisk +casdsk_disk_get_gendisk cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_get_gendisk);$/;" v +casdsk_disk_get_gendisk cas_disk/disk.c /^struct gendisk *casdsk_disk_get_gendisk(struct casdsk_disk *dsk)$/;" f +casdsk_disk_get_queue cas_cache/cas_cache.h /^ struct request_queue *(*casdsk_disk_get_queue)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_disk_get_queue +casdsk_disk_get_queue cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_get_queue);$/;" v +casdsk_disk_get_queue cas_disk/disk.c /^struct request_queue *casdsk_disk_get_queue(struct casdsk_disk *dsk)$/;" f +casdsk_disk_in_transition cas_disk/disk.h /^static inline bool casdsk_disk_in_transition(struct casdsk_disk *dsk)$/;" f +casdsk_disk_is_attached cas_disk/disk.h /^static inline bool casdsk_disk_is_attached(struct casdsk_disk *dsk)$/;" f +casdsk_disk_is_pt cas_disk/disk.h /^static inline bool casdsk_disk_is_pt(struct casdsk_disk *dsk)$/;" f +casdsk_disk_is_shutdown cas_disk/disk.h /^static inline bool casdsk_disk_is_shutdown(struct casdsk_disk *dsk)$/;" f +casdsk_disk_is_unknown cas_disk/disk.h /^static inline bool casdsk_disk_is_unknown(struct casdsk_disk *dsk)$/;" f +casdsk_disk_ktype cas_disk/disk.c /^static struct kobj_type casdsk_disk_ktype = {$/;" v typeref:struct:kobj_type file: +casdsk_disk_lock cas_disk/disk.h /^static inline void casdsk_disk_lock(struct casdsk_disk *dsk)$/;" f +casdsk_disk_open cas_cache/cas_cache.h /^ struct casdsk_disk *(*casdsk_disk_open)(const char *path, void *private);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_disk_open +casdsk_disk_open cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_open);$/;" v +casdsk_disk_open cas_disk/disk.c /^struct casdsk_disk *casdsk_disk_open(const char *path, void *private)$/;" f +casdsk_disk_set_attached cas_cache/cas_cache.h /^ int (*casdsk_disk_set_attached)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_disk_set_attached cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_set_attached);$/;" v +casdsk_disk_set_attached cas_disk/disk.c /^int casdsk_disk_set_attached(struct casdsk_disk *dsk)$/;" f +casdsk_disk_set_pt cas_cache/cas_cache.h /^ int (*casdsk_disk_set_pt)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_disk_set_pt cas_disk/disk.c /^EXPORT_SYMBOL(casdsk_disk_set_pt);$/;" v +casdsk_disk_set_pt cas_disk/disk.c /^int casdsk_disk_set_pt(struct casdsk_disk *dsk)$/;" f +casdsk_disk_shutdown_all cas_disk/disk.c /^void __exit casdsk_disk_shutdown_all(void)$/;" f +casdsk_disk_unlock cas_disk/disk.h /^static inline void casdsk_disk_unlock(struct casdsk_disk *dsk)$/;" f +casdsk_exit_module cas_disk/main.c /^module_exit(casdsk_exit_module);$/;" v +casdsk_exit_module cas_disk/main.c /^static void __exit casdsk_exit_module(void)$/;" f file: +casdsk_exp_obj cas_disk/exp_obj.h /^struct casdsk_exp_obj {$/;" s +casdsk_exp_obj_activate cas_cache/cas_cache.h /^ int (*casdsk_exp_obj_activate)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_activate cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_activate);$/;" v +casdsk_exp_obj_activate cas_disk/exp_obj.c /^int casdsk_exp_obj_activate(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_activated cas_cache/cas_cache.h /^ bool (*casdsk_exp_obj_activated)(struct casdsk_disk *ds);$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_activated cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_activated);$/;" v +casdsk_exp_obj_activated cas_disk/exp_obj.c /^bool casdsk_exp_obj_activated(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_alloc cas_disk/exp_obj.c /^static int casdsk_exp_obj_alloc(struct casdsk_disk *dsk)$/;" f file: +casdsk_exp_obj_attach cas_disk/exp_obj.c /^int casdsk_exp_obj_attach(struct casdsk_disk *dsk, struct module *owner,$/;" f +casdsk_exp_obj_create cas_cache/cas_cache.h /^ int (*casdsk_exp_obj_create)(struct casdsk_disk *dsk, const char *dev_name,$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_create cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_create);$/;" v +casdsk_exp_obj_create cas_disk/exp_obj.c /^int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,$/;" f +casdsk_exp_obj_destroy cas_cache/cas_cache.h /^ int (*casdsk_exp_obj_destroy)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_destroy cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_destroy);$/;" v +casdsk_exp_obj_destroy cas_disk/exp_obj.c /^int casdsk_exp_obj_destroy(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_dettach cas_disk/exp_obj.c /^int casdsk_exp_obj_dettach(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_free cas_disk/exp_obj.c /^void casdsk_exp_obj_free(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_get_gendisk cas_cache/cas_cache.h /^ struct gendisk *(*casdsk_exp_obj_get_gendisk)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_exp_obj_get_gendisk +casdsk_exp_obj_get_gendisk cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_get_gendisk);$/;" v +casdsk_exp_obj_get_gendisk cas_disk/exp_obj.c /^struct gendisk *casdsk_exp_obj_get_gendisk(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_get_queue cas_cache/cas_cache.h /^ struct request_queue *(*casdsk_exp_obj_get_queue)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper typeref:struct:casdsk_functions_mapper::casdsk_exp_obj_get_queue +casdsk_exp_obj_get_queue cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_get_queue);$/;" v +casdsk_exp_obj_get_queue cas_disk/exp_obj.c /^struct request_queue *casdsk_exp_obj_get_queue(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_ktype cas_disk/exp_obj.c /^static struct kobj_type casdsk_exp_obj_ktype = {$/;" v typeref:struct:kobj_type file: +casdsk_exp_obj_lock cas_cache/cas_cache.h /^ int (*casdsk_exp_obj_lock)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_lock cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_lock);$/;" v +casdsk_exp_obj_lock cas_disk/exp_obj.c /^int casdsk_exp_obj_lock(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_ops cas_disk/cas_disk.h /^struct casdsk_exp_obj_ops {$/;" s +casdsk_exp_obj_prepare_attached cas_disk/exp_obj.c /^void casdsk_exp_obj_prepare_attached(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_prepare_pt cas_disk/exp_obj.c /^void casdsk_exp_obj_prepare_pt(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_prepare_shutdown cas_disk/exp_obj.c /^void casdsk_exp_obj_prepare_shutdown(struct casdsk_disk *dsk)$/;" f +casdsk_exp_obj_pt_io_ctx cas_disk/exp_obj.h /^struct casdsk_exp_obj_pt_io_ctx {$/;" s +casdsk_exp_obj_unlock cas_cache/cas_cache.h /^ int (*casdsk_exp_obj_unlock)(struct casdsk_disk *dsk);$/;" m struct:casdsk_functions_mapper +casdsk_exp_obj_unlock cas_disk/exp_obj.c /^EXPORT_SYMBOL(casdsk_exp_obj_unlock);$/;" v +casdsk_exp_obj_unlock cas_disk/exp_obj.c /^int casdsk_exp_obj_unlock(struct casdsk_disk *dsk)$/;" f +casdsk_free_stored_config cas_cache/cas_cache.h /^ void (*casdsk_free_stored_config)(void);$/;" m struct:casdsk_functions_mapper +casdsk_free_stored_config cas_disk/main.c /^EXPORT_SYMBOL(casdsk_free_stored_config);$/;" v +casdsk_free_stored_config cas_disk/main.c /^void casdsk_free_stored_config(void)$/;" f +casdsk_functions_mapper cas_cache/cas_cache.h /^struct casdsk_functions_mapper {$/;" s +casdsk_get_stored_config cas_cache/cas_cache.h /^ size_t (*casdsk_get_stored_config)(struct casdsk_props_conf **blobs);$/;" m struct:casdsk_functions_mapper +casdsk_get_stored_config cas_disk/main.c /^EXPORT_SYMBOL(casdsk_get_stored_config);$/;" v +casdsk_get_stored_config cas_disk/main.c /^size_t casdsk_get_stored_config(struct casdsk_props_conf **blobs)$/;" f +casdsk_get_version cas_cache/cas_cache.h /^ uint32_t (*casdsk_get_version)(void);$/;" m struct:casdsk_functions_mapper +casdsk_get_version cas_disk/main.c /^EXPORT_SYMBOL(casdsk_get_version);$/;" v +casdsk_get_version cas_disk/main.c /^uint32_t casdsk_get_version(void)$/;" f +casdsk_init_disks cas_disk/disk.c /^int __init casdsk_init_disks(void)$/;" f +casdsk_init_exp_objs cas_disk/exp_obj.c /^int __init casdsk_init_exp_objs(void)$/;" f +casdsk_init_kobjects cas_disk/main.c /^static int __init casdsk_init_kobjects(void)$/;" f file: +casdsk_init_module cas_disk/main.c /^module_init(casdsk_init_module);$/;" v +casdsk_init_module cas_disk/main.c /^static int __init casdsk_init_module(void)$/;" f file: +casdsk_kobj_to_disk cas_disk/disk.h /^static inline struct casdsk_disk *casdsk_kobj_to_disk(struct kobject *kobj)$/;" f +casdsk_kobj_to_exp_obj cas_disk/exp_obj.h /^static inline struct casdsk_exp_obj *casdsk_kobj_to_exp_obj(struct kobject *kobj)$/;" f +casdsk_module cas_disk/cas_disk_defs.h /^struct casdsk_module {$/;" s +casdsk_module cas_disk/main.c /^struct casdsk_module *casdsk_module;$/;" v typeref:struct:casdsk_module +casdsk_props_conf cas_disk/cas_disk.h /^struct casdsk_props_conf {$/;" s +casdsk_store_config cas_cache/cas_cache.h /^ void (*casdsk_store_config)(size_t n_blobs, struct casdsk_props_conf *blobs);$/;" m struct:casdsk_functions_mapper +casdsk_store_config cas_disk/main.c /^EXPORT_SYMBOL(casdsk_store_config);$/;" v +casdsk_store_config cas_disk/main.c /^void casdsk_store_config(size_t n_blobs, struct casdsk_props_conf *blobs)$/;" f +casdsk_stored_config cas_disk/cas_disk_defs.h /^struct casdsk_stored_config {$/;" s +casdsk_sysfs_ops cas_disk/sysfs.c /^const struct sysfs_ops casdsk_sysfs_ops = {$/;" v typeref:struct:sysfs_ops +cdev cas_cache/control.c /^ struct cdev cdev;$/;" m struct:cas_ctrl_device typeref:struct:cas_ctrl_device::cdev file: +check_cflag config.mk /^check_cflag=$(shell echo "" | \\$/;" m +check_header config.mk /^check_header=$(shell echo "\\#include <${1}>" | \\$/;" m +class cas_cache/control.c /^ struct class *class;$/;" m struct:cas_ctrl_device typeref:struct:cas_ctrl_device::class file: +class_id include/cas_ioctl_codes.h /^ uint32_t class_id;$/;" m struct:kcas_io_class +clean_shutdown include/cas_ioctl_codes.h /^ bool clean_shutdown;$/;" m struct:kcas_cache_check_device +cleanup_queue cas_disk/cas_disk.h /^ void (*cleanup_queue)(struct casdsk_disk *dsk, struct request_queue *q,$/;" m struct:casdsk_exp_obj_ops +close_bdev_exclusive cas_disk/cas_disk_defs.h /^static inline void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)$/;" f +cmd cas_cache/object/obj_atomic_dev_bottom.c /^ struct nvme_command cmd;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::nvme_command file: +cmpl cas_cache/utils/utils_rpool.c /^ struct completion cmpl;$/;" m struct:_cas_rpool_pre_alloc_info typeref:struct:_cas_rpool_pre_alloc_info::completion file: +cmpl_context cas_cache/object/obj_atomic_dev_bottom.c /^ void *cmpl_context;$/;" m struct:cas_atomic_io file: +cmpl_fn cas_cache/object/obj_atomic_dev_bottom.c /^ ocf_end_io_t cmpl_fn;$/;" m struct:cas_atomic_io file: +cmpl_work cas_cache/object/obj_atomic_dev_bottom.c /^ struct work_struct cmpl_work;$/;" m struct:cas_atomic_write_zero_ctx typeref:struct:cas_atomic_write_zero_ctx::work_struct file: +compl cas_cache/threads.c /^ struct completion compl;$/;" m struct:cas_thread_info typeref:struct:cas_thread_info::completion file: +config cas_disk/cas_disk_defs.h /^ struct casdsk_stored_config config;$/;" m struct:casdsk_module typeref:struct:casdsk_module::casdsk_stored_config +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id; \/**< id core object to be removed *\/$/;" m struct:kcas_flush_core +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id; \/**< id core object to be removed *\/$/;" m struct:kcas_remove_core +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id; \/**< id core object to be removed *\/$/;" m struct:kcas_reset_stats +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id; \/**< id of newely inserted core object *\/$/;" m struct:kcas_insert_core +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id;$/;" m struct:kcas_core_info +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id;$/;" m struct:kcas_get_core_param +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id;$/;" m struct:kcas_io_class +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id;$/;" m struct:kcas_set_core_param +core_id include/cas_ioctl_codes.h /^ ocf_core_id_t core_id[OCF_CORE_MAX];$/;" m struct:kcas_cache_info +core_param_id_max include/cas_ioctl_codes.h /^ core_param_id_max,$/;" e enum:kcas_core_param_id +core_param_seq_cutoff_policy include/cas_ioctl_codes.h /^ core_param_seq_cutoff_policy,$/;" e enum:kcas_core_param_id +core_param_seq_cutoff_threshold include/cas_ioctl_codes.h /^ core_param_seq_cutoff_threshold,$/;" e enum:kcas_core_param_id +core_path_name include/cas_ioctl_codes.h /^ char core_path_name[MAX_STR_LEN]; \/**< path to a core object *\/$/;" m struct:kcas_core_pool_remove +core_path_name include/cas_ioctl_codes.h /^ char core_path_name[MAX_STR_LEN]; \/**< path to a core object *\/$/;" m struct:kcas_insert_core +core_path_name include/cas_ioctl_codes.h /^ char core_path_name[MAX_STR_LEN];$/;" m struct:kcas_core_info +core_path_name_tab cas_cache/layer_cache_management.c /^ char *core_path_name_tab;$/;" m struct:get_paths_ctx file: +core_path_tab include/cas_ioctl_codes.h /^ char *core_path_tab;$/;" m struct:kcas_core_pool_path +core_pool_count include/cas_ioctl_codes.h /^ int core_pool_count;$/;" m struct:kcas_core_pool_count +core_pool_count include/cas_ioctl_codes.h /^ int core_pool_count;$/;" m struct:kcas_core_pool_path +count cas_cache/object/obj_atomic_dev_bottom.c /^ uint32_t count;$/;" m struct:cas_atomic_io file: +count cas_cache/ocf_env.c /^ atomic_t count;$/;" m struct:_env_allocator file: +count cas_cache/ocf_env.h /^ atomic_t count;$/;" m struct:__anon1 +count cas_cache/utils/utils_rpool.c /^ atomic_t count;$/;" m struct:_cas_reserve_pool_per_cpu file: +cpu cas_cache/ocf_env.c /^ uint32_t cpu;$/;" m struct:_env_allocator_item file: +crc cas_disk/cas_disk.h /^ uint16_t crc;$/;" m struct:casdsk_props_conf +ctx_cfg cas_cache/context.c /^static const struct ocf_ctx_config ctx_cfg = {$/;" v typeref:struct:ocf_ctx_config file: +data cas_cache/object/obj_atomic_dev_bottom.c /^ struct blk_data *data;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::blk_data file: +data cas_cache/object/obj_blk_utils.h /^ struct blk_data *data; \/* IO data buffer *\/$/;" m struct:blkio typeref:struct:blkio::blk_data +data cas_cache/ocf_env.c /^ char data[];$/;" m struct:_env_allocator_item file: +detach include/cas_ioctl_codes.h /^ bool detach; \/**< detach core without removing it from cache metadata *\/$/;" m struct:kcas_remove_core +dev cas_cache/control.c /^ dev_t dev;$/;" m struct:cas_ctrl_device file: +dev_name cas_disk/exp_obj.h /^ const char *dev_name;$/;" m struct:casdsk_exp_obj +device_path_name include/cas_ioctl_codes.h /^ char device_path_name[MAX_STR_LEN]; \/**< path to NVMe device*\/$/;" m struct:kcas_nvme_format +dir cas_cache/object/obj_atomic_dev_bottom.c /^ unsigned dir:1;$/;" m struct:cas_atomic_io file: +dir cas_cache/object/obj_blk_utils.h /^ int32_t dir;$/;" m struct:blkio +dirty cas_cache/object/obj_blk_utils.h /^ int32_t dirty;$/;" m struct:blkio +discard cas_cache/object/obj_atomic_dev_bottom.c /^ unsigned discard:1;$/;" m struct:cas_atomic_io file: +disk_cache cas_disk/cas_disk_defs.h /^ struct kmem_cache *disk_cache;$/;" m struct:casdsk_module typeref:struct:casdsk_module::kmem_cache +disk_list cas_disk/cas_disk_defs.h /^ struct list_head disk_list;$/;" m struct:casdsk_module typeref:struct:casdsk_module::list_head +disk_major cas_disk/cas_disk_defs.h /^ int disk_major;$/;" m struct:casdsk_module +dry_run cas_cache/main.c /^u32 dry_run;$/;" v +dsk cas_cache/object/obj_blk.h /^ struct casdsk_disk *dsk;$/;" m struct:bd_volume typeref:struct:bd_volume::casdsk_disk +dsk cas_disk/exp_obj.h /^ struct casdsk_disk *dsk;$/;" m struct:casdsk_exp_obj_pt_io_ctx typeref:struct:casdsk_exp_obj_pt_io_ctx::casdsk_disk +end cas_cache/object/obj_atomic_dev_bottom.c /^ uint32_t end;$/;" m struct:cas_atomic_io file: +entry_size cas_cache/utils/utils_rpool.c /^ uint32_t entry_size;$/;" m struct:cas_reserve_pool file: +env_allocator cas_cache/ocf_env.h /^typedef struct _env_allocator env_allocator;$/;" t typeref:struct:_env_allocator +env_allocator_align cas_cache/ocf_env.c /^static inline size_t env_allocator_align(size_t size)$/;" f file: +env_allocator_create cas_cache/ocf_env.c /^env_allocator *env_allocator_create(uint32_t size, const char *name)$/;" f +env_allocator_del cas_cache/ocf_env.c /^void env_allocator_del(env_allocator *allocator, void *obj)$/;" f +env_allocator_del_rpool cas_cache/ocf_env.c /^void env_allocator_del_rpool(void *allocator_ctx, void *item)$/;" f +env_allocator_destroy cas_cache/ocf_env.c /^void env_allocator_destroy(env_allocator *allocator)$/;" f +env_allocator_item_count cas_cache/ocf_env.c /^uint32_t env_allocator_item_count(env_allocator *allocator)$/;" f +env_allocator_new cas_cache/ocf_env.c /^void *env_allocator_new(env_allocator *allocator)$/;" f +env_allocator_new_rpool cas_cache/ocf_env.c /^void *env_allocator_new_rpool(void *allocator_ctx, int cpu)$/;" f +env_atomic cas_cache/ocf_env.h /^typedef atomic_t env_atomic;$/;" t +env_atomic64 cas_cache/ocf_env.h /^typedef atomic64_t env_atomic64;$/;" t +env_atomic64_add cas_cache/ocf_env.h /^static inline void env_atomic64_add(u64 i, env_atomic64 *a)$/;" f +env_atomic64_cmpxchg cas_cache/ocf_env.h /^static inline u64 env_atomic64_cmpxchg(atomic64_t *a, u64 old, u64 new)$/;" f +env_atomic64_dec cas_cache/ocf_env.h /^static inline void env_atomic64_dec(env_atomic64 *a)$/;" f +env_atomic64_inc cas_cache/ocf_env.h /^static inline void env_atomic64_inc(env_atomic64 *a)$/;" f +env_atomic64_inc_return cas_cache/ocf_env.h /^static inline u64 env_atomic64_inc_return(env_atomic64 *a)$/;" f +env_atomic64_read cas_cache/ocf_env.h /^static inline u64 env_atomic64_read(const env_atomic64 *a)$/;" f +env_atomic64_set cas_cache/ocf_env.h /^static inline void env_atomic64_set(env_atomic64 *a, u64 i)$/;" f +env_atomic64_sub cas_cache/ocf_env.h /^static inline void env_atomic64_sub(u64 i, env_atomic64 *a)$/;" f +env_atomic_add cas_cache/ocf_env.h /^static inline void env_atomic_add(int i, env_atomic *a)$/;" f +env_atomic_add_return cas_cache/ocf_env.h /^static inline int env_atomic_add_return(int i, env_atomic *a)$/;" f +env_atomic_add_unless cas_cache/ocf_env.h /^static inline int env_atomic_add_unless(env_atomic *a, int i, int u)$/;" f +env_atomic_cmpxchg cas_cache/ocf_env.h /^static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value)$/;" f +env_atomic_dec cas_cache/ocf_env.h /^static inline void env_atomic_dec(env_atomic *a)$/;" f +env_atomic_dec_and_test cas_cache/ocf_env.h /^static inline bool env_atomic_dec_and_test(env_atomic *a)$/;" f +env_atomic_dec_return cas_cache/ocf_env.h /^static inline int env_atomic_dec_return(env_atomic *a)$/;" f +env_atomic_inc cas_cache/ocf_env.h /^static inline void env_atomic_inc(env_atomic *a)$/;" f +env_atomic_inc_and_test cas_cache/ocf_env.h /^static inline bool env_atomic_inc_and_test(env_atomic *a)$/;" f +env_atomic_inc_return cas_cache/ocf_env.h /^static inline int env_atomic_inc_return(env_atomic *a)$/;" f +env_atomic_read cas_cache/ocf_env.h /^static inline int env_atomic_read(const env_atomic *a)$/;" f +env_atomic_set cas_cache/ocf_env.h /^static inline void env_atomic_set(env_atomic *a, int i)$/;" f +env_atomic_sub cas_cache/ocf_env.h /^static inline void env_atomic_sub(int i, env_atomic *a)$/;" f +env_atomic_sub_and_test cas_cache/ocf_env.h /^static inline bool env_atomic_sub_and_test(int i, env_atomic *a)$/;" f +env_atomic_sub_return cas_cache/ocf_env.h /^static inline int env_atomic_sub_return(int i, env_atomic *a)$/;" f +env_bit_clear cas_cache/ocf_env.h /^static inline void env_bit_clear(int nr, volatile void *addr)$/;" f +env_bit_set cas_cache/ocf_env.h /^static inline void env_bit_set(int nr, volatile void *addr)$/;" f +env_bit_test cas_cache/ocf_env.h /^static inline int env_bit_test(int nr, const void *addr)$/;" f +env_completion cas_cache/ocf_env.h /^typedef struct completion env_completion;$/;" t typeref:struct:completion +env_completion_complete cas_cache/ocf_env.h /^static inline void env_completion_complete(env_completion *completion)$/;" f +env_completion_init cas_cache/ocf_env.h /^static inline void env_completion_init(env_completion *completion)$/;" f +env_completion_wait cas_cache/ocf_env.h /^static inline void env_completion_wait(env_completion *completion)$/;" f +env_cond_resched cas_cache/ocf_env.h /^static inline void env_cond_resched(void)$/;" f +env_crc32 cas_cache/ocf_env.h /^static inline uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)$/;" f +env_free cas_cache/ocf_env.h /^static inline void env_free(const void *ptr)$/;" f +env_get_free_memory cas_cache/ocf_env.h /^static inline uint64_t env_get_free_memory(void)$/;" f +env_get_tick_count cas_cache/ocf_env.h /^static inline uint64_t env_get_tick_count(void)$/;" f +env_in_interrupt cas_cache/ocf_env.h /^static inline int env_in_interrupt(void)$/;" f +env_malloc cas_cache/ocf_env.h /^static inline void *env_malloc(size_t size, int flags)$/;" f +env_memcmp cas_cache/ocf_env.h 549;" d +env_memcpy cas_cache/ocf_env.h 545;" d +env_memset cas_cache/ocf_env.h 541;" d +env_msleep cas_cache/ocf_env.h /^static inline void env_msleep(uint64_t n)$/;" f +env_mutex cas_cache/ocf_env.h /^typedef struct mutex env_mutex;$/;" t typeref:struct:mutex +env_mutex_init cas_cache/ocf_env.h /^static inline int env_mutex_init(env_mutex *mutex)$/;" f +env_mutex_is_locked cas_cache/ocf_env.h /^static inline int env_mutex_is_locked(env_mutex *mutex)$/;" f +env_mutex_lock cas_cache/ocf_env.h /^static inline void env_mutex_lock(env_mutex *mutex)$/;" f +env_mutex_lock_interruptible cas_cache/ocf_env.h /^static inline int env_mutex_lock_interruptible(env_mutex *mutex)$/;" f +env_mutex_trylock cas_cache/ocf_env.h /^static inline int env_mutex_trylock(env_mutex *mutex)$/;" f +env_mutex_unlock cas_cache/ocf_env.h /^static inline void env_mutex_unlock(env_mutex *mutex)$/;" f +env_rmutex cas_cache/ocf_env.h /^} env_rmutex;$/;" t typeref:struct:__anon1 +env_rmutex_init cas_cache/ocf_env.h /^static inline int env_rmutex_init(env_rmutex *rmutex)$/;" f +env_rmutex_is_locked cas_cache/ocf_env.h /^static inline int env_rmutex_is_locked(env_rmutex *rmutex)$/;" f +env_rmutex_lock cas_cache/ocf_env.h /^static inline void env_rmutex_lock(env_rmutex *rmutex)$/;" f +env_rmutex_lock_interruptible cas_cache/ocf_env.h /^static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex)$/;" f +env_rmutex_trylock cas_cache/ocf_env.h /^static inline int env_rmutex_trylock(env_rmutex *rmutex)$/;" f +env_rmutex_unlock cas_cache/ocf_env.h /^static inline void env_rmutex_unlock(env_rmutex *rmutex)$/;" f +env_rwlock cas_cache/ocf_env.h /^typedef rwlock_t env_rwlock;$/;" t +env_rwlock_init cas_cache/ocf_env.h /^static inline void env_rwlock_init(env_rwlock *l)$/;" f +env_rwlock_read_lock cas_cache/ocf_env.h /^static inline void env_rwlock_read_lock(env_rwlock *l)$/;" f +env_rwlock_read_unlock cas_cache/ocf_env.h /^static inline void env_rwlock_read_unlock(env_rwlock *l)$/;" f +env_rwlock_write_lock cas_cache/ocf_env.h /^static inline void env_rwlock_write_lock(env_rwlock *l)$/;" f +env_rwlock_write_unlock cas_cache/ocf_env.h /^static inline void env_rwlock_write_unlock(env_rwlock *l)$/;" f +env_rwsem cas_cache/ocf_env.h /^} env_rwsem;$/;" t typeref:struct:__anon2 +env_rwsem_down_read cas_cache/ocf_env.h /^static inline void env_rwsem_down_read(env_rwsem *s)$/;" f +env_rwsem_down_read_interruptible cas_cache/ocf_env.h /^static inline int env_rwsem_down_read_interruptible(env_rwsem *s)$/;" f +env_rwsem_down_read_trylock cas_cache/ocf_env.h /^static inline int env_rwsem_down_read_trylock(env_rwsem *s)$/;" f +env_rwsem_down_write cas_cache/ocf_env.h /^static inline void env_rwsem_down_write(env_rwsem *s)$/;" f +env_rwsem_down_write_interruptible cas_cache/ocf_env.h /^static inline int env_rwsem_down_write_interruptible(env_rwsem *s)$/;" f +env_rwsem_down_write_trylock cas_cache/ocf_env.h /^static inline int env_rwsem_down_write_trylock(env_rwsem *s)$/;" f +env_rwsem_init cas_cache/ocf_env.h /^static inline int env_rwsem_init(env_rwsem *s)$/;" f +env_rwsem_is_locked cas_cache/ocf_env.h /^static inline int env_rwsem_is_locked(env_rwsem *s)$/;" f +env_rwsem_up_read cas_cache/ocf_env.h /^static inline void env_rwsem_up_read(env_rwsem *s)$/;" f +env_rwsem_up_write cas_cache/ocf_env.h /^static inline void env_rwsem_up_write(env_rwsem *s)$/;" f +env_secs_to_ticks cas_cache/ocf_env.h /^static inline uint64_t env_secs_to_ticks(uint64_t j)$/;" f +env_sort cas_cache/ocf_env.c /^void env_sort(void *base, size_t num, size_t size,$/;" f +env_sort_generic_swap cas_cache/ocf_env.c /^static void env_sort_generic_swap(void *a, void *b, int size)$/;" f file: +env_sort_is_aligned cas_cache/ocf_env.c /^static int env_sort_is_aligned(const void *base, int align)$/;" f file: +env_sort_u32_swap cas_cache/ocf_env.c /^static void env_sort_u32_swap(void *a, void *b, int size)$/;" f file: +env_sort_u64_swap cas_cache/ocf_env.c /^static void env_sort_u64_swap(void *a, void *b, int size)$/;" f file: +env_spinlock cas_cache/ocf_env.h /^typedef spinlock_t env_spinlock;$/;" t +env_spinlock_init cas_cache/ocf_env.h /^static inline void env_spinlock_init(env_spinlock *l)$/;" f +env_spinlock_lock cas_cache/ocf_env.h /^static inline void env_spinlock_lock(env_spinlock *l)$/;" f +env_spinlock_lock_irq cas_cache/ocf_env.h /^static inline void env_spinlock_lock_irq(env_spinlock *l)$/;" f +env_spinlock_lock_irqsave cas_cache/ocf_env.h 422;" d +env_spinlock_unlock cas_cache/ocf_env.h /^static inline void env_spinlock_unlock(env_spinlock *l)$/;" f +env_spinlock_unlock_irq cas_cache/ocf_env.h /^static inline void env_spinlock_unlock_irq(env_spinlock *l)$/;" f +env_spinlock_unlock_irqrestore cas_cache/ocf_env.h 425;" d +env_strdup cas_cache/ocf_env.h 553;" d +env_strncmp cas_cache/ocf_env.h 555;" d +env_strncpy cas_cache/ocf_env.h 556;" d +env_strnlen cas_cache/ocf_env.h 554;" d +env_ticks_to_msecs cas_cache/ocf_env.h /^static inline uint64_t env_ticks_to_msecs(uint64_t j)$/;" f +env_ticks_to_nsecs cas_cache/ocf_env.h /^static inline uint64_t env_ticks_to_nsecs(uint64_t j)$/;" f +env_ticks_to_secs cas_cache/ocf_env.h /^static inline uint64_t env_ticks_to_secs(uint64_t j)$/;" f +env_time_after cas_cache/ocf_env.h /^static inline bool env_time_after(uint64_t a, uint64_t b)$/;" f +env_vfree cas_cache/ocf_env.h /^static inline void env_vfree(const void *ptr)$/;" f +env_vmalloc cas_cache/ocf_env.h /^static inline void *env_vmalloc(size_t size)$/;" f +env_vzalloc cas_cache/ocf_env.h /^static inline void *env_vzalloc(size_t size)$/;" f +env_waitqueue cas_cache/ocf_env.h /^typedef wait_queue_head_t env_waitqueue;$/;" t +env_waitqueue_init cas_cache/ocf_env.h /^static inline void env_waitqueue_init(env_waitqueue *w)$/;" f +env_waitqueue_wait cas_cache/ocf_env.h 471;" d +env_waitqueue_wake_up cas_cache/ocf_env.h /^static inline void env_waitqueue_wake_up(env_waitqueue *w)$/;" f +env_zalloc cas_cache/ocf_env.h /^static inline void *env_zalloc(size_t size, int flags)$/;" f +error cas_cache/context.h /^ int error;$/;" m struct:blk_data +error cas_cache/layer_upgrade.c /^ int error;$/;" m struct:_cas_cache_dump_conf_visitor_ctx file: +error cas_cache/layer_upgrade.c /^ int error;$/;" m struct:_cas_upgrade_dump_io_class_visit_ctx file: +error cas_cache/layer_upgrade.c /^ int error;$/;" m struct:_ocf_core_visitor_ctx file: +error cas_cache/object/obj_atomic_dev_bottom.c /^ int error;$/;" m struct:cas_atomic_io file: +error cas_cache/object/obj_blk_utils.h /^ int error;$/;" m struct:blkio +error cas_cache/utils/utils_rpool.c /^ int error;$/;" m struct:_cas_rpool_pre_alloc_info file: +eviction_policy include/cas_ioctl_codes.h /^ ocf_eviction_t eviction_policy;$/;" m struct:kcas_start_cache +exp_obj cas_disk/disk.h /^ struct casdsk_exp_obj *exp_obj;$/;" m struct:casdsk_disk typeref:struct:casdsk_disk::casdsk_exp_obj +exp_obj_cache cas_disk/cas_disk_defs.h /^ struct kmem_cache *exp_obj_cache;$/;" m struct:casdsk_module typeref:struct:casdsk_module::kmem_cache +expobj_locked cas_cache/object/obj_blk.h /^ uint32_t expobj_locked : 1;$/;" m struct:bd_volume +expobj_valid cas_cache/object/obj_blk.h /^ uint32_t expobj_valid : 1;$/;" m struct:bd_volume +exported_symbol cas_cache/main.c /^struct exported_symbol {$/;" s file: +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_cache_check_device +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_cache_count +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_cache_info +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_cache_list +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_capabilites +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_core_info +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_core_pool_count +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_core_pool_path +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_core_pool_remove +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_flush_cache +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_flush_core +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_get_cache_param +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_get_core_param +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_insert_core +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_interrupt_flushing +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_io_class +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_io_classes +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_nvme_format +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_remove_core +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_reset_stats +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_set_cache_param +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_set_cache_state +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_set_core_param +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_start_cache +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_stop_cache +ext_err_code include/cas_ioctl_codes.h /^ int ext_err_code;$/;" m struct:kcas_upgrade +find_lbaf cas_cache/utils/utils_nvme.c /^static inline int find_lbaf(struct nvme_lbaf *lbaf, int cnt, int atomic)$/;" f file: +flags cas_cache/object/obj_atomic_dev_bottom.c /^ unsigned long flags;$/;" m struct:cas_atomic_io file: +flags cas_cache/ocf_env.c /^ uint32_t flags;$/;" m struct:_env_allocator_item file: +flush_data include/cas_ioctl_codes.h /^ uint8_t flush_data; \/**< should data be flushed? *\/$/;" m struct:kcas_set_cache_state +flush_data include/cas_ioctl_codes.h /^ uint8_t flush_data; \/**< should data be flushed? *\/$/;" m struct:kcas_start_cache +flush_data include/cas_ioctl_codes.h /^ uint8_t flush_data; \/**< should data be flushed? *\/$/;" m struct:kcas_stop_cache +force include/cas_ioctl_codes.h /^ int force;$/;" m struct:kcas_nvme_format +force include/cas_ioctl_codes.h /^ uint8_t force; \/**< should force option be used? *\/$/;" m struct:kcas_start_cache +force_no_flush include/cas_ioctl_codes.h /^ bool force_no_flush; \/**< remove core without flushing *\/$/;" m struct:kcas_remove_core +format_atomic include/cas_ioctl_codes.h /^ bool format_atomic;$/;" m struct:kcas_cache_check_device +freed cas_cache/utils/utils_gc.c /^static atomic_t freed = ATOMIC_INIT(0);$/;" v file: +gd cas_disk/exp_obj.h /^ struct gendisk *gd;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::gendisk +gd_flags cas_disk/disk.h /^ int gd_flags;$/;" m struct:casdsk_disk +gd_minors cas_disk/disk.h /^ int gd_minors;$/;" m struct:casdsk_disk +get_paths_ctx cas_cache/layer_cache_management.c /^struct get_paths_ctx {$/;" s file: +get_starting_vec cas_cache/utils/utils_data.c /^static int get_starting_vec(struct bio_vec *vecs, uint64_t vecs_num,$/;" f file: +get_stats include/cas_ioctl_codes.h /^ uint8_t get_stats;$/;" m struct:kcas_io_class +holder cas_cache/ocf_env.h /^ struct task_struct *holder;$/;" m struct:__anon1 typeref:struct:__anon1::task_struct +i cas_cache/layer_upgrade.c /^ int i;$/;" m struct:_cas_cache_dump_conf_visitor_ctx file: +i cas_cache/layer_upgrade.c /^ int i;$/;" m struct:_ocf_core_visitor_ctx file: +id cas_disk/disk.h /^ uint32_t id;$/;" m struct:casdsk_disk +id_position include/cas_ioctl_codes.h /^ uint32_t id_position;$/;" m struct:kcas_cache_list +idx cas_cache/context.h /^ uint32_t idx;$/;" m struct:bio_vec_iter +iface_version cas_disk/main.c /^static int iface_version = CASDSK_IFACE_VERSION;$/;" v file: +in_out_num include/cas_ioctl_codes.h /^ uint32_t in_out_num;$/;" m struct:kcas_cache_list +in_upgrade cas_cache/main.c /^bool in_upgrade;$/;" v +info include/cas_ioctl_codes.h /^ struct ocf_cache_info info;$/;" m struct:kcas_cache_info typeref:struct:kcas_cache_info::ocf_cache_info +info include/cas_ioctl_codes.h /^ struct ocf_io_class_info info;$/;" m struct:kcas_io_class typeref:struct:kcas_io_class::ocf_io_class_info +info include/cas_ioctl_codes.h /^ struct ocf_io_class_info info[OCF_IO_CLASS_MAX];$/;" m struct:kcas_io_classes typeref:struct:kcas_io_classes::ocf_io_class_info +init_cache include/cas_ioctl_codes.h /^ uint8_t init_cache;$/;" m struct:kcas_start_cache +io cas_cache/context.h /^ struct ocf_io *io;$/;" m struct:blk_data typeref:struct:blk_data::ocf_io +io_class_counter cas_cache/layer_upgrade.c /^ uint32_t io_class_counter;$/;" m struct:_cas_upgrade_dump_io_class_visit_ctx file: +io_class_info2cfg cas_cache/layer_cache_management.c /^static inline void io_class_info2cfg(ocf_part_id_t part_id,$/;" f file: +ioctl cas_disk/cas_disk.h /^ int (*ioctl)(struct casdsk_disk *dsk, unsigned int cmd, unsigned long arg,$/;" m struct:casdsk_exp_obj_ops +is_atomic_capable cas_cache/object/obj_atomic_dev_bottom.h /^ unsigned is_atomic_capable : 1;$/;" m struct:atomic_dev_params +is_cache_device include/cas_ioctl_codes.h /^ bool is_cache_device;$/;" m struct:kcas_cache_check_device +is_mode_optimal cas_cache/object/obj_atomic_dev_bottom.h /^ unsigned is_mode_optimal : 1;$/;" m struct:atomic_dev_params +is_rq_type_fs cas_cache/linux_kernel_version.h /^static inline int is_rq_type_fs(struct request *rq)$/;" f +item cas_cache/utils/utils_properties.c /^ struct list_head item;$/;" m struct:_cas_property typeref:struct:_cas_property::list_head file: +item_size cas_cache/ocf_env.c /^ uint32_t item_size;$/;" m struct:_env_allocator file: +iter cas_cache/context.h /^ struct bio_vec_iter iter;$/;" m struct:blk_data typeref:struct:blk_data::bio_vec_iter +iter cas_cache/object/obj_atomic_dev_bottom.c /^ struct bio_vec_iter iter;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::bio_vec_iter file: +iter cas_cache/object/obj_blk_utils.h /^ struct bio_vec_iter iter;$/;" m struct:blkio typeref:struct:blkio::bio_vec_iter +ivec cas_cache/context.h /^ struct bio_vec *ivec;$/;" m struct:bio_vec_iter typeref:struct:bio_vec_iter::bio_vec +kcas_cache_check_device include/cas_ioctl_codes.h /^struct kcas_cache_check_device {$/;" s +kcas_cache_count include/cas_ioctl_codes.h /^struct kcas_cache_count {$/;" s +kcas_cache_info include/cas_ioctl_codes.h /^struct kcas_cache_info {$/;" s +kcas_cache_list include/cas_ioctl_codes.h /^struct kcas_cache_list {$/;" s +kcas_cache_param_id include/cas_ioctl_codes.h /^enum kcas_cache_param_id {$/;" g +kcas_capabilites include/cas_ioctl_codes.h /^struct kcas_capabilites {$/;" s +kcas_core_info include/cas_ioctl_codes.h /^struct kcas_core_info {$/;" s +kcas_core_param_id include/cas_ioctl_codes.h /^enum kcas_core_param_id {$/;" g +kcas_core_pool_count include/cas_ioctl_codes.h /^struct kcas_core_pool_count {$/;" s +kcas_core_pool_path include/cas_ioctl_codes.h /^struct kcas_core_pool_path {$/;" s +kcas_core_pool_remove include/cas_ioctl_codes.h /^struct kcas_core_pool_remove {$/;" s +kcas_error include/cas_ioctl_codes.h /^enum kcas_error {$/;" g +kcas_flush_cache include/cas_ioctl_codes.h /^struct kcas_flush_cache {$/;" s +kcas_flush_core include/cas_ioctl_codes.h /^struct kcas_flush_core {$/;" s +kcas_get_cache_param include/cas_ioctl_codes.h /^struct kcas_get_cache_param {$/;" s +kcas_get_core_param include/cas_ioctl_codes.h /^struct kcas_get_core_param {$/;" s +kcas_insert_core include/cas_ioctl_codes.h /^struct kcas_insert_core {$/;" s +kcas_interrupt_flushing include/cas_ioctl_codes.h /^struct kcas_interrupt_flushing {$/;" s +kcas_io_class include/cas_ioctl_codes.h /^struct kcas_io_class {$/;" s +kcas_io_classes include/cas_ioctl_codes.h /^struct kcas_io_classes {$/;" s +kcas_nvme_format include/cas_ioctl_codes.h /^struct kcas_nvme_format {$/;" s +kcas_remove_core include/cas_ioctl_codes.h /^struct kcas_remove_core {$/;" s +kcas_reset_stats include/cas_ioctl_codes.h /^struct kcas_reset_stats {$/;" s +kcas_set_cache_param include/cas_ioctl_codes.h /^struct kcas_set_cache_param {$/;" s +kcas_set_cache_state include/cas_ioctl_codes.h /^struct kcas_set_cache_state {$/;" s +kcas_set_core_param include/cas_ioctl_codes.h /^struct kcas_set_core_param {$/;" s +kcas_start_cache include/cas_ioctl_codes.h /^struct kcas_start_cache {$/;" s +kcas_stop_cache include/cas_ioctl_codes.h /^struct kcas_stop_cache {$/;" s +kcas_upgrade include/cas_ioctl_codes.h /^struct kcas_upgrade {$/;" s +key cas_cache/utils/utils_properties.c /^ char *key;$/;" m struct:_cas_property file: +kicked cas_cache/threads.c /^ atomic_t kicked;$/;" m struct:cas_thread_info file: +kmem_cache cas_cache/ocf_env.c /^ struct kmem_cache *kmem_cache;$/;" m struct:_env_allocator typeref:struct:_env_allocator::kmem_cache file: +kobj cas_disk/cas_disk_defs.h /^ struct kobject kobj;$/;" m struct:casdsk_module typeref:struct:casdsk_module::kobject +kobj cas_disk/disk.h /^ struct kobject kobj;$/;" m struct:casdsk_disk typeref:struct:casdsk_disk::kobject +kobj cas_disk/exp_obj.h /^ struct kobject kobj;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::kobject +len cas_cache/context.h /^ uint32_t len;$/;" m struct:bio_vec_iter +limit cas_cache/utils/utils_rpool.c /^ uint32_t limit;$/;" m struct:cas_reserve_pool file: +line_size include/cas_ioctl_codes.h /^ ocf_cache_line_size_t line_size;$/;" m struct:kcas_start_cache +list cas_cache/context.h /^ struct list_head list;$/;" m struct:blk_data typeref:struct:blk_data::list_head +list cas_cache/utils/utils_gc.c /^ struct llist_head list;$/;" m struct:cas_vfree_item typeref:struct:cas_vfree_item::llist_head file: +list cas_cache/utils/utils_properties.c /^ struct list_head list;$/;" m struct:cas_properties typeref:struct:cas_properties::list_head file: +list cas_cache/utils/utils_rpool.c /^ struct list_head list;$/;" m struct:_cas_reserve_pool_per_cpu typeref:struct:_cas_reserve_pool_per_cpu::list_head file: +list cas_disk/disk.h /^ struct list_head list;$/;" m struct:casdsk_disk typeref:struct:casdsk_disk::list_head +lock cas_cache/utils/utils_rpool.c /^ spinlock_t lock;$/;" m struct:_cas_reserve_pool_per_cpu file: +lock cas_disk/cas_disk_defs.h /^ struct mutex lock;$/;" m struct:casdsk_module typeref:struct:casdsk_module::mutex +lock cas_disk/disk.h /^ struct mutex lock;$/;" m struct:casdsk_disk typeref:struct:casdsk_disk::mutex +locked_bd cas_disk/exp_obj.h /^ struct block_device *locked_bd;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::block_device +make_request_fn cas_disk/cas_disk.h /^ int (*make_request_fn)(struct casdsk_disk *dsk, struct request_queue *q,$/;" m struct:casdsk_exp_obj_ops +map_cas_err_to_generic_code cas_cache/service_ui_ioctl.c /^int map_cas_err_to_generic_code(int cas_error_code)$/;" f +master cas_cache/object/obj_atomic_dev_bottom.c /^ struct cas_atomic_io *master;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::cas_atomic_io file: +master_io_req cas_cache/context.h /^ void *master_io_req;$/;" m struct:blk_data +master_remaining cas_cache/context.h /^ atomic_t master_remaining;$/;" m struct:blk_data +max_count cas_cache/layer_cache_management.c /^ int max_count;$/;" m struct:get_paths_ctx file: +max_writeback_queue_size cas_cache/main.c /^u32 max_writeback_queue_size = 65536;$/;" v +metadata cas_cache/object/obj_atomic_dev_bottom.c /^ unsigned metadata:1;$/;" m struct:cas_atomic_io file: +metadata_layout cas_cache/main.c /^u32 metadata_layout = ocf_metadata_layout_default;$/;" v +metadata_mode cas_cache/object/obj_atomic_dev_bottom.h /^ enum atomic_metadata_mode metadata_mode;$/;" m struct:atomic_dev_params typeref:enum:atomic_dev_params::atomic_metadata_mode +metadata_mode include/cas_ioctl_codes.h /^ int metadata_mode; \/**< selected metadata mode *\/$/;" m struct:kcas_nvme_format +metadata_mode include/cas_ioctl_codes.h /^ uint8_t metadata_mode; \/**< metadata mode (normal\/atomic) *\/$/;" m struct:kcas_cache_info +metadata_mode_optimal include/cas_ioctl_codes.h /^ uint8_t metadata_mode_optimal; \/**< Current metadata mode is optimal *\/$/;" m struct:kcas_start_cache +min_free_ram include/cas_ioctl_codes.h /^ uint64_t min_free_ram; \/**< Minimum free RAM memory for cache metadata *\/$/;" m struct:kcas_start_cache +mk_rq_fn cas_disk/exp_obj.h /^ make_request_fn *mk_rq_fn;$/;" m struct:casdsk_exp_obj +mode cas_disk/disk.h /^ atomic_t mode;$/;" m struct:casdsk_disk +mutex cas_cache/ocf_env.h /^ struct mutex mutex;$/;" m struct:__anon1 typeref:struct:__anon1::mutex +n_blobs cas_disk/cas_disk_defs.h /^ size_t n_blobs;$/;" m struct:casdsk_stored_config +name cas_cache/main.c /^ char *name;$/;" m struct:exported_symbol file: +name cas_cache/ocf_env.c /^ char *name;$/;" m struct:_env_allocator file: +name cas_cache/threads.c /^ char name[MAX_THREAD_NAME_SIZE];$/;" m struct:cas_thread_info file: +name cas_cache/utils/utils_rpool.c /^ char *name;$/;" m struct:cas_reserve_pool file: +next_disk_id cas_disk/cas_disk_defs.h /^ uint32_t next_disk_id;$/;" m struct:casdsk_module +next_minor cas_disk/cas_disk_defs.h /^ int next_minor;$/;" m struct:casdsk_module +nsid cas_cache/object/obj_atomic_dev_bottom.h /^ unsigned int nsid;$/;" m struct:atomic_dev_params +nvme_format include/cas_ioctl_codes.h /^ uint8_t nvme_format : 1;$/;" m struct:kcas_capabilites +obj cas_cache/object/obj_atomic_dev_bottom.c /^ ocf_data_obj_t obj;$/;" m struct:cas_atomic_io file: +obj-m cas_cache/Makefile /^obj-m := cas_cache.o$/;" m +obj-m cas_disk/Makefile /^obj-m := cas_disk.o$/;" m +object_type_t cas_cache/cas_cache.h /^enum object_type_t {$/;" g +offset cas_cache/context.h /^ uint32_t offset;$/;" m struct:bio_vec_iter +open_bdev_exclusive cas_disk/cas_disk_defs.h /^static inline struct block_device *open_bdev_exclusive(const char *path,$/;" f +opened_by_bdev cas_cache/object/obj_blk.h /^ uint32_t opened_by_bdev : 1;$/;" m struct:bd_volume +ops cas_disk/exp_obj.h /^ struct casdsk_exp_obj_ops *ops;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::casdsk_exp_obj_ops +original_io cas_cache/object/obj_atomic_dev_bottom.c /^ struct ocf_io *original_io;$/;" m struct:cas_atomic_write_zero_ctx typeref:struct:cas_atomic_write_zero_ctx::ocf_io file: +owner cas_disk/exp_obj.h /^ struct module *owner;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::module +param_id include/cas_ioctl_codes.h /^ enum kcas_cache_param_id param_id;$/;" m struct:kcas_get_cache_param typeref:enum:kcas_get_cache_param::kcas_cache_param_id +param_id include/cas_ioctl_codes.h /^ enum kcas_cache_param_id param_id;$/;" m struct:kcas_set_cache_param typeref:enum:kcas_set_cache_param::kcas_cache_param_id +param_id include/cas_ioctl_codes.h /^ enum kcas_core_param_id param_id;$/;" m struct:kcas_get_core_param typeref:enum:kcas_get_core_param::kcas_core_param_id +param_id include/cas_ioctl_codes.h /^ enum kcas_core_param_id param_id;$/;" m struct:kcas_set_core_param typeref:enum:kcas_set_core_param::kcas_core_param_id +param_value include/cas_ioctl_codes.h /^ uint32_t param_value;$/;" m struct:kcas_get_cache_param +param_value include/cas_ioctl_codes.h /^ uint32_t param_value;$/;" m struct:kcas_get_core_param +param_value include/cas_ioctl_codes.h /^ uint32_t param_value;$/;" m struct:kcas_set_cache_param +param_value include/cas_ioctl_codes.h /^ uint32_t param_value;$/;" m struct:kcas_set_core_param +path cas_disk/disk.h /^ char *path;$/;" m struct:casdsk_disk +path_name include/cas_ioctl_codes.h /^ char path_name[MAX_STR_LEN]; \/**< path to a device *\/$/;" m struct:kcas_cache_check_device +pending_rqs cas_cache/object/obj_blk.h /^ atomic64_t pending_rqs;$/;" m struct:bd_volume +pending_rqs cas_disk/exp_obj.h /^ atomic_t *pending_rqs;$/;" m struct:casdsk_exp_obj +pending_rqs_cache cas_disk/cas_disk_defs.h /^ struct kmem_cache *pending_rqs_cache;$/;" m struct:casdsk_module typeref:struct:casdsk_module::kmem_cache +position cas_cache/layer_cache_management.c /^ int position;$/;" m struct:get_paths_ctx file: +potential_dirty cas_cache/object/obj_atomic_dev_bottom.c /^ atomic_t potential_dirty;$/;" m struct:cas_atomic_io file: +potentially_dirty cas_cache/object/obj_blk.h /^ atomic_t potentially_dirty;$/;" m struct:bd_volume +prep_rq_fn cas_disk/cas_disk.h /^ int (*prep_rq_fn)(struct casdsk_disk *dsk, struct request_queue *q,$/;" m struct:casdsk_exp_obj_ops +prepare_queue cas_disk/cas_disk.h /^ int (*prepare_queue)(struct casdsk_disk *dsk, struct request_queue *q,$/;" m struct:casdsk_exp_obj_ops +pt_io_ctx_cache cas_disk/cas_disk_defs.h /^ struct kmem_cache *pt_io_ctx_cache;$/;" m struct:casdsk_module typeref:struct:casdsk_module::kmem_cache +pt_ios cas_disk/exp_obj.h /^ atomic_t pt_ios;$/;" m struct:casdsk_exp_obj +queue cas_disk/exp_obj.h /^ struct request_queue *queue;$/;" m struct:casdsk_exp_obj typeref:struct:casdsk_exp_obj::request_queue +ref_counter cas_cache/object/obj_blk_utils.h /^ atomic_t ref_counter;$/;" m struct:blkio +req_remaining cas_cache/object/obj_atomic_dev_bottom.c /^ atomic_t req_remaining;$/;" m struct:cas_atomic_io file: +request cas_cache/object/obj_atomic_dev_bottom.c /^ struct request *request;$/;" m struct:cas_atomic_io typeref:struct:cas_atomic_io::request file: +request_fn cas_disk/cas_disk.h /^ void (*request_fn)(struct casdsk_disk *dsk, struct request_queue *q,$/;" m struct:casdsk_exp_obj_ops +restore_callback_t cas_cache/layer_upgrade.c /^typedef int (*restore_callback_t) (struct cas_properties *cache_props);$/;" t file: +rpool cas_cache/ocf_env.c /^ struct cas_reserve_pool *rpool;$/;" m struct:_env_allocator typeref:struct:_env_allocator::cas_reserve_pool file: +rpool_master cas_cache/utils/utils_rpool.c /^ struct cas_reserve_pool *rpool_master;$/;" m struct:_cas_rpool_pre_alloc_info typeref:struct:_cas_rpool_pre_alloc_info::cas_reserve_pool file: +rpool_new cas_cache/utils/utils_rpool.c /^ cas_rpool_new rpool_new;$/;" m struct:_cas_rpool_pre_alloc_info file: +rpools cas_cache/utils/utils_rpool.c /^ struct _cas_reserve_pool_per_cpu *rpools;$/;" m struct:cas_reserve_pool typeref:struct:cas_reserve_pool::_cas_reserve_pool_per_cpu file: +rq_lock cas_disk/exp_obj.h /^ spinlock_t rq_lock;$/;" m struct:casdsk_exp_obj +rq_remaning cas_cache/object/obj_blk_utils.h /^ atomic_t rq_remaning;$/;" m struct:blkio +running cas_cache/threads.c /^ bool running;$/;" m struct:cas_thread_info file: +sem cas_cache/ocf_env.h /^ struct rw_semaphore sem;$/;" m struct:__anon2 typeref:struct:__anon2::rw_semaphore +seq_cut_off_mb cas_cache/main.c /^u32 seq_cut_off_mb = 1;$/;" v +set_geometry cas_disk/cas_disk.h /^ int (*set_geometry)(struct casdsk_disk *dsk, void *private);$/;" m struct:casdsk_exp_obj_ops +show cas_disk/sysfs.h /^ ssize_t (*show)(struct kobject *kobj, char *page);$/;" m struct:casdsk_attribute +size cas_cache/context.h /^ uint32_t size;$/;" m struct:blk_data +size cas_cache/object/obj_atomic_dev_bottom.h /^ uint64_t size;$/;" m struct:atomic_dev_params +size cas_disk/cas_disk.h /^ size_t size;$/;" m struct:casdsk_props_conf +start cas_cache/object/obj_atomic_dev_bottom.c /^ uint32_t start;$/;" m struct:cas_atomic_io file: +start_time cas_cache/context.h /^ unsigned long long start_time;$/;" m struct:blk_data +state include/cas_ioctl_codes.h /^ ocf_core_state_t state;$/;" m struct:kcas_core_info +stats include/cas_ioctl_codes.h /^ struct ocf_stats_core stats;$/;" m struct:kcas_core_info typeref:struct:kcas_core_info::ocf_stats_core +stats include/cas_ioctl_codes.h /^ struct ocf_stats_io_class stats;$/;" m struct:kcas_io_class typeref:struct:kcas_io_class::ocf_stats_io_class +std_error cas_cache/service_ui_ioctl.c /^ int std_error;$/;" m struct:__anon4 file: +step_size cas_cache/object/obj_atomic_dev_bottom.c /^ unsigned step_size;$/;" m struct:cas_atomic_write_zero_ctx file: +stop cas_cache/threads.c /^ atomic_t stop;$/;" m struct:cas_thread_info file: +store cas_disk/sysfs.h /^ ssize_t (*store)(struct kobject *kobj, const char *buf, size_t len);$/;" m struct:casdsk_attribute +sub_io cas_cache/object/obj_atomic_dev_bottom.c /^ struct ocf_io *sub_io;$/;" m struct:cas_atomic_write_zero_ctx typeref:struct:cas_atomic_write_zero_ctx::ocf_io file: +sync_compl cas_cache/threads.c /^ struct completion sync_compl;$/;" m struct:cas_thread_info typeref:struct:cas_thread_info::completion file: +sync_data cas_cache/threads.c /^ void *sync_data;$/;" m struct:cas_thread_info file: +thread cas_cache/threads.c /^ struct task_struct *thread;$/;" m struct:cas_thread_info typeref:struct:cas_thread_info::task_struct file: +timeval_to_us cas_cache/cas_cache.h /^static inline unsigned long long timeval_to_us(const struct timeval *tv)$/;" f +try_add include/cas_ioctl_codes.h /^ bool try_add; \/**< add core to pool if cache isn't present *\/$/;" m struct:kcas_insert_core +type cas_cache/utils/utils_properties.c /^ uint8_t type;$/;" m struct:_cas_property file: +unaligned_io cas_cache/main.c /^u32 unaligned_io = 1;$/;" v +update_path include/cas_ioctl_codes.h /^ bool update_path; \/**< provide alternative path for core device *\/$/;" m struct:kcas_insert_core +upgrade_in_progress cas_disk/main.c /^static int upgrade_in_progress = 0;$/;" v file: +use_io_scheduler cas_cache/main.c /^u32 use_io_scheduler = 1;$/;" v +value cas_cache/utils/utils_properties.c /^ void *value;$/;" m union:_cas_property::__anon3 file: +value_sint cas_cache/utils/utils_properties.c /^ int64_t value_sint;$/;" m union:_cas_property::__anon3 file: +value_uint cas_cache/utils/utils_properties.c /^ uint64_t value_uint;$/;" m union:_cas_property::__anon3 file: +vec cas_cache/context.h /^ struct bio_vec *vec;$/;" m struct:bio_vec_iter typeref:struct:bio_vec_iter::bio_vec +vec cas_cache/context.h /^ struct bio_vec vec[];$/;" m struct:blk_data typeref:struct:blk_data::bio_vec +vec_size cas_cache/context.h /^ uint32_t vec_size;$/;" m struct:bio_vec_iter +workqueue cas_cache/object/obj_blk.h /^ struct workqueue_struct *workqueue;$/;" m struct:bd_volume typeref:struct:bd_volume::workqueue_struct +wq cas_cache/ocf_env.h /^ wait_queue_head_t wq;$/;" m struct:__anon2 +wq cas_cache/threads.c /^ wait_queue_head_t wq;$/;" m struct:cas_thread_info file: +writeback_queue_unblock_size cas_cache/main.c /^u32 writeback_queue_unblock_size = 60000;$/;" v +ws cas_cache/utils/utils_gc.c /^ struct work_struct ws;$/;" m struct:cas_vfree_item typeref:struct:cas_vfree_item::work_struct file: +ws cas_cache/utils/utils_rpool.c /^ struct work_struct ws;$/;" m struct:_cas_rpool_pre_alloc_info typeref:struct:_cas_rpool_pre_alloc_info::work_struct file: diff --git a/ocf b/ocf new file mode 160000 index 000000000..40f1e9e0d --- /dev/null +++ b/ocf @@ -0,0 +1 @@ +Subproject commit 40f1e9e0d3cd06e01dce4a0117c19ff107f74370 diff --git a/utils/60-persistent-storage-cas-load.rules b/utils/60-persistent-storage-cas-load.rules new file mode 100644 index 000000000..6933cfa3b --- /dev/null +++ b/utils/60-persistent-storage-cas-load.rules @@ -0,0 +1,11 @@ +ACTION!="add", GOTO="cas_loader_end" +SUBSYSTEM!="block", GOTO="cas_loader_end" + +RUN+="/lib/opencas/open-cas-loader /dev/$name" + +# Work around systemd<->udev interaction, make sure filesystems with labels on +# cas are mounted properly +KERNEL!="cas*", GOTO="cas_loader_end" +IMPORT{builtin}="blkid" +ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", RUN+="/lib/opencas/open-cas-mount-utility $env{ID_FS_LABEL_ENC}" +LABEL="cas_loader_end" diff --git a/utils/60-persistent-storage-cas.rules b/utils/60-persistent-storage-cas.rules new file mode 100644 index 000000000..9739cb68d --- /dev/null +++ b/utils/60-persistent-storage-cas.rules @@ -0,0 +1,38 @@ +ACTION=="remove", GOTO="cas_end" + +SUBSYSTEM=="block", KERNEL=="cas*", OPTIONS+="watch" + +SUBSYSTEM!="block", GOTO="cas_end" +KERNEL!="cas*", GOTO="cas_end" + +# ignore partitions that span the entire disk +TEST=="whole_disk", GOTO="cas_end" + +# for partitions import parent information +ENV{DEVTYPE}=="partition", IMPORT{parent}="ID_*" + +# by-path +ENV{DEVTYPE}=="disk", DEVPATH!="*/virtual/*", IMPORT{builtin}="path_id" +ENV{DEVTYPE}=="disk", ENV{ID_PATH}=="?*", SYMLINK+="disk/by-path/$env{ID_PATH}" +ENV{DEVTYPE}=="partition", ENV{ID_PATH}=="?*", SYMLINK+="disk/by-path/$env{ID_PATH}-part%n" + +# probe filesystem metadata of disks +KERNEL=="cas*", IMPORT{builtin}="blkid" + +# by-label/by-uuid links (filesystem metadata) +ENV{ID_FS_USAGE}=="filesystem|other|crypto", ENV{ID_FS_UUID_ENC}=="?*", SYMLINK+="disk/by-uuid/$env{ID_FS_UUID_ENC}", OPTIONS+="link_priority=999" +ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", SYMLINK+="disk/by-label/$env{ID_FS_LABEL_ENC}", OPTIONS+="link_priority=999" + +# by-id (World Wide Name) +ENV{DEVTYPE}=="disk", ENV{ID_WWN_WITH_EXTENSION}=="?*", SYMLINK+="disk/by-id/wwn-$env{ID_WWN_WITH_EXTENSION}" +ENV{DEVTYPE}=="partition", ENV{ID_WWN_WITH_EXTENSION}=="?*", SYMLINK+="disk/by-id/wwn-$env{ID_WWN_WITH_EXTENSION}-part%n" + +# by-partlabel/by-partuuid links (partition metadata) +ENV{ID_PART_ENTRY_SCHEME}=="gpt", ENV{ID_PART_ENTRY_UUID}=="?*", SYMLINK+="disk/by-partuuid/$env{ID_PART_ENTRY_UUID}", OPTIONS+="link_priority=999" +ENV{ID_PART_ENTRY_SCHEME}=="gpt", ENV{ID_PART_ENTRY_NAME}=="?*", SYMLINK+="disk/by-partlabel/$env{ID_PART_ENTRY_NAME}", OPTIONS+="link_priority=999" + +# add symlink to GPT root disk +ENV{ID_PART_ENTRY_SCHEME}=="gpt", ENV{ID_PART_GPT_AUTO_ROOT}=="1", SYMLINK+="gpt-auto-root", OPTIONS+="link_priority=999" + +LABEL="cas_end" + diff --git a/utils/Makefile b/utils/Makefile new file mode 100644 index 000000000..f55eb30f1 --- /dev/null +++ b/utils/Makefile @@ -0,0 +1,71 @@ +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +CASCTL_DIR = /lib/opencas +UDEVRULES_DIR = /lib/udev/rules.d +UDEV:=$(shell which udevadm) +SYSTEMCTL := $(shell which systemctl) + +ifeq (, $(shell which systemctl)) +define cas_install + install -m 755 open-cas-shutdown /etc/init.d/open-cas-shutdown + /sbin/chkconfig open-cas-shutdown on; service open-cas-shutdown start +endef +else +ifneq "$(wildcard /usr/lib/systemd/system)" "" + SYSTEMD_DIR=/usr/lib/systemd/system +else + SYSTEMD_DIR=/lib/systemd/system +endif +define cas_install + install -m 644 open-cas-shutdown.service $(SYSTEMD_DIR)/open-cas-shutdown.service + install -m 755 -d $(SYSTEMD_DIR)/../system-shutdown + install -m 755 open-cas.shutdown $(SYSTEMD_DIR)/../system-shutdown/open-cas.shutdown + $(SYSTEMCTL) daemon-reload + $(SYSTEMCTL) -q enable open-cas-shutdown +endef +endif + +# Just a placeholder when running make from parent dir without install/uninstall arg +default: ; + +install: + @echo "Installing Open-CAS utils" + + @install -m 755 -d $(CASCTL_DIR) + @install -m 644 opencas.py $(CASCTL_DIR)/opencas.py + @install -m 755 casctl $(CASCTL_DIR)/casctl + @install -m 755 open-cas-loader $(CASCTL_DIR)/open-cas-loader + @install -m 755 open-cas-mount-utility $(CASCTL_DIR)/open-cas-mount-utility + + @ln -fs $(CASCTL_DIR)/casctl /sbin/casctl + + @install -m 644 60-persistent-storage-cas-load.rules $(UDEVRULES_DIR)/60-persistent-storage-cas-load.rules + @install -m 644 60-persistent-storage-cas.rules $(UDEVRULES_DIR)/60-persistent-storage-cas.rules + + @install -m 755 -d /usr/share/doc/opencas + + @$(UDEV) control --reload-rules + + @install -m 644 casctl.8 /usr/share/man/man8/casctl.8 + + $(cas_install) + +uninstall: + @rm $(CASCTL_DIR)/opencas.py + @rm $(CASCTL_DIR)/casctl + @rm $(CASCTL_DIR)/open-cas-loader + @rm $(CASCTL_DIR)/open-cas-mount-utility + @rm -rf $(CASCTL_DIR) + + @rm /sbin/casctl + + @rm /usr/share/man/man8/casctl.8 + + @rm /lib/udev/rules.d/60-persistent-storage-cas-load.rules + @rm /lib/udev/rules.d/60-persistent-storage-cas.rules + + +.PHONY: install uninstall clean distclean diff --git a/utils/casadm.8 b/utils/casadm.8 new file mode 100644 index 000000000..36637d381 --- /dev/null +++ b/utils/casadm.8 @@ -0,0 +1,565 @@ +.TH casadm 8 __CAS_DATE__ v__CAS_VERSION__ +.SH NAME +casadm \- create, and manage Open CAS instances + +.SH SYNOPSIS + +\fBcasadm\fR [options...] + +.SH COPYRIGHT +Copyright(c) 2012-2019 by the Intel Corporation. + +.SH DESCRIPTION +Open Cache Acceleration Software (CAS) accelerates Linux applications by caching +active (hot) data to a local flash device inside servers. Open CAS implements +caching at the server level, utilizing local high-performance flash media as +the cache drive media inside the application server as close as possible to +the CPU, thus reducing storage latency as much as possible. +.PP +Open Cache Acceleration Software installs into the GNU/Linux operating system itself, +as a kernel module. The nature of the integration provides a cache solution that is +transparent to users and applications, and your existing storage infrastructure. No +storage migration effort or application changes are required. +.PP +\fBCache device\fR is a faster drive (e.g. SSD-type) used for speeding-up core device. +.br +\fBCore device\fR is a slower drive (e.g. HDD-type) that will be accelerated by Open CAS. + +.SH MODES +Open CAS caching software has several modes of operation: +.TP +.B Write-Through (wt) +Write-Through is a basic caching mode where writes are done synchronously to +the cache device and to the core device. Write-Through cache, which is also known +as Read Cache, mainly improves performance of read IO operations. + +.TP +.B Write-Back (wb) +In Write-Back mode writes are initially written to the cache device only. Cached +write operations that are not synchronized with core device are marked as dirty. +The procedure of writing dirty data from cache device to core device is known as +cleaning. Cleaning may be required if cache is full and eviction (replacement) +policy needs to remove stale data to make space for incoming blocks. Open CAS +provides mechanism which automatically cleans dirty data in background. This is +cleaning (flushing) thread. User can also invoke manual cleaning procedure (see +-E, --flush-cache and -F --flush-core options). Write-Back cache, also known as +Write Cache, improves performance of both read and write IO operations. + +.TP +.B Write-Around (wa) +In Write-Around mode write operations are not cached. This means that write to +block that does not exist in cache is written directly to the core device, +bypassing the cache. If write operation is issued to the block which is already +in cache (because of previous read operation) then write is send to the core device +and cache block is updated in the cache device. Write-Around cache improves performance +of workloads where write operation is done rarely and no further read accesses +to that data are performed, so there is no point in caching it. + +.TP +.B Pass-Through (pt) +In Pass-Through mode all read and write operations are not cached and sent directly +to the core device. Pass-Through mode may be used in case if user doesn't want to +cache any workload, for example in case if there are some maintenance operations +causing cache pollution. + +.SH COMMANDS +.TP +.B -S, --start-cache +Start cache instance. + +.TP +.B -T, --stop-cache +Stop cache instance. + +.TP +.B -X, --set-param +Set runtime parameter for cache/core instance. + +.TP +.B -G, --set-param +Get runtime parameter for cache/core instance. + +.TP +.B -Q, --set-cache-mode +Switch caching mode of cache instance. + +.TP +.B -A, --add-core +Add core device to cache instance. + +.TP +.B -R, --remove-core +Remove core device from cache instance. + +.TP +.B " "--remove-detached +Remove core device from core pool. + +.TP +.B -L, --list-caches +List all cache instances and core devices. + +.TP +.B -P, --stats +Print statistics of cache instance. + +.TP +.B -Z, --reset-counters +Reset statistics of given cache/core instance. + +.TP +.B -F, --flush-cache +Flush all dirty data from the caching device to core devices. + +.TP +.B -E, --flush-core +Flush dirty data of a given core from the caching device to this core device. + +.TP +.B -C, --io-class {--load-config|--list} +Manage IO classes. +.br + + 1. \fB-C, --load-config\fR - load default configuration of IO classes. + \fBNOTE:\fR See /etc/opencas for example configuration file. + + 2. \fB-L, --list\fR - print current IO class configuration. Allowed output formats: table or CSV. + +.TP +.B -N, --nvme +Manage NVMe device. +.br + + 1. \fB-F, --format\fR {normal|atomic} - format NVMe device to one of supported modes. + \fBNOTE:\fR After formatting NVMe device platform reboot is required. + +.br +Defines cache metadata mode. +In normal mode NVMe namespace uses LBA of size 512 bytes. Cache data and metadata +are written to disk in separate requests in the same way that it happens with +standard SSD device. +Atomic mode exploits extended NVMe metadata features. In this mode namespace +uses 520 bytes LBA allowing to write cache data and metadata in a single +request (atomically). + +.TP +.B -H, --help +Print help. + +.TP +.B -V, --version +Print Open CAS product version. + +.SH OPTIONS +List of available options depends on current context of invocation. For each +command there is a different list of available options: + +.BR + +.SH Options that are valid with --start-cache (-S) are: + +.TP +.B -d, --cache-device +Path to caching device to be used e.g. SSD device (/dev/sdb). + +.TP +.B -i, --cache-id +Unique identifier of cache (if not provided the first available will be used) <1-16384>. + +.TP +.B -l, --load +If metadata exists on a device and this parameter is used, cache will be started based on information from metadata. +If this parameter is not used, cache will be started with full initialization of new metadata. +This option should be used if dirty data were not flushed on exit (if the cache was stopped with the -n, --no-data-flush option). + +\fBCAUTION:\fR +.br +\fB*\fR If the data between the cache device and core device is not in sync (e.g. changes between cache stop and load operations), starting +cache with load option may cause data mismatch. + +.TP +.B -f, --force +Force to start a cache. By default cache will not be started if utility detects file system on cache device. +This parameter ignores this situations, and starts a cache instance. + +.TP +.B -c, --cache-mode {wt|wb|wa|pt} +Cache mode to be used for a cache instance. + +Available modes are: +.br +1. \fBwt - Write-Through (default)\fR. +.br +2. \fBwb - Write-Back\fR. +.br +3. \fBwa - Write-Around\fR. +.br +4. \fBpt - Pass-Through\fR. + +.TP +.B -x, --cache-line-size +Set cache line size for given cache instance, expressed in KiB. This +can't be reconfigured runtime. Allowed values: {4,8,16,32,64} +(default: 4) + +.SH Options that are valid with --stop-cache (-T) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -n, --no-data-flush +Do not flush dirty data on exit (may be \fBDANGEROUS\fR). +If this option was used, the cache should be restarted with the -l, --load option. +.br +\fBNOTE:\fR If dirty data were not flushed, the contents of a core device +MUST NOT be changed before restarting the cache. Otherwise there is +a data mismatch risk. + +.SH Options that are valid with --set-param (-X) are: + +.TP +.B -n, --name +Name of parameters namespace. + +Available namespaces are: +.br +\fBseq-cutoff\fR - Sequential cutoff parameters. +\fBcleaning\fR - Cleaning policy parameters. +\fBcleaning-alru\fR - Cleaning policy ALRU parameters. +\fBcleaning-acp\fR - Cleaning policy ACP parameters. + +.SH Options that are valid with --set-param (-X) --name (-n) seq-cutoff are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. If this option +is not specified, parameter is set to all cores within given cache instance. + +.TP +.B -t, --seq-threshold +Amount of sequential data in KiB after which request is handled in pass-through mode. + +.TP +.B -p, --seq-policy {always|full|never} +Sequential cutoff policy to be used with a given core instance(s). + +.SH Options that are valid with --set-param (-X) --name (-n) cleaning are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -p, --policy {nop|alru|acp} +Cleaning policy type to be used with a given cache instance. + +Available policies: +.br +1. \fBnop\fR. No Operation (no periodical cleaning, clean on eviction only). +.br +2. \fBalru\fR. Approximately Least Recently Used (default). +.br +3. \fBacp\fR. Aggressive Cleaning Policy. + +.SH Options that are valid with --set-param (-X) --name (-n) cleaning-alru are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -w, --wake-up +Period of time between awakenings of flushing thread [s] (default: 20 s). + +.TP +.B -s, --staleness-time +Time that has to pass from the last write operation before a dirty cache block can be scheduled to be flushed [s] (default: 120 s). + +.TP +.B -b, --flush-max-buffers +Number of dirty cache blocks to be flushed in one cleaning cycle (default: 100). + +.TP +.B -t, --activity-threshold +Cache idle time before flushing thread can start [ms] (default: 10000 ms). + +.SH Options that are valid with --set-param (-X) --name (-n) cleaning-acp are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -w, --wake-up +Period of time between awakenings of flushing thread [ms] (default: 10 ms). + +.TP +.B -b, --flush-max-buffers +Number of dirty cache blocks to be flushed in one cleaning cycle (default: 128). + +.SH Options that are valid with --get-param (-G) are: + +.TP +.B -n, --name +Name of parameters namespace. + +Available namespaces are: +.br +\fBseq-cutoff\fR - Sequential cutoff parameters. +\fBcleaning\fR - Cleaning policy parameters. +\fBcleaning-alru\fR - Cleaning policy ALRU parameters. +\fBcleaning-acp\fR - Cleaning policy ACP parameters. + +.SH Options that are valid with --get-param (-G) --name (-n) seq-cutoff are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. + +.TP +.B -o, --output-format {table|csv} +Defines output format for parameter list. It can be either \fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --get-param (-G) --name (-n) cleaning are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -o, --output-format {table|csv} +Defines output format for parameter list. It can be either \fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --get-param (-G) --name (-n) cleaning-alru are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -o, --output-format {table|csv} +Defines output format for parameter list. It can be either \fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --get-param (-G) --name (-n) cleaning-acp are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -o, --output-format {table|csv} +Defines output format for parameter list. It can be either \fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --set-cache-mode (-Q) are: +.TP +.B -c, --cache-mode {wt|wb|wa|pt} +Cache mode to be used with a given cache instance. + +Available modes: +.br +1. \fBwt - Write-Through\fR. +.br +2. \fBwb - Write-Back\fR. +.br +3. \fBwa - Write-Around\fR. +.br +4. \fBpt - Pass-Through\fR. + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -f, --flush-cache {yes|no} +Flush all cache dirty data before switching to different mode. Option is required +when switching from Write-Back mode. + +.SH Options that are valid with --add-core (-A) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -d, --core-device +Path to core device e.g. HDD device. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance for new core to be created. This +parameter is optional. If it is not supplied, first available core id within cache instance will +be used for new core. + +.SH Options that are valid with --remove-core (-R) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. + +.TP +.B -f, --force +Force remove inactive core. + +.SH Options that are valid with --remove-detached are: +.TP +.B -d, --device +Path to core device to be removed from core pool. + +.SH Options that are valid with --list-caches (-L) are: +.TP +.B -o, --output-format {table|csv} +Defines output format for list of all cache instances and core devices. It can be either \fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --stats (-P) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. If this option is +not given, aggregate statistics for whole cache instance are printed instead. + +.TP +.B -d, --io-class-id +Identifier of IO class <0-33>. + +.TP +.B -f, --filter +Defines filters to be applied. This is comma separated (no +white-spaces allowed) list from following set of available: + +.br +1. \fBconf\fR - provides information on configuration. +.br +2. \fBusage\fR - occupancy, free, clean and dirty statistics are printed. +.br +3. \fBreq\fR - IO request level statistics are printed. +.br +4. \fBblk\fR - block level statistics are printed. +.br +5. \fBerr\fR - error statistics are printed. +.br +6. \fBall\fR - all of the above. +.br + +Default for --filter option is \fBall\fR. + +.TP +.B -o --output-format {table|csv} +Defines output format for statistics. It can be either \fBtable\fR +(default) or \fBcsv\fR. + +.SH Options that are valid with --reset-counters (-Z) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. If this option +is not specified, statistics are reset for all cores within given cache instance. + + +.SH Options that are valid with --flush-cache (-F) are: + +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.SH Options that are valid with --flush-core (-E) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -j, --core-id +Identifier of core instance <0-4095> within given cache instance. + +.SH Options that are valid with --io-class --load-config (-C -C) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -f, --file +Configuration file containing IO class definition. + +.SH Options that are valid with --io-class --list (-C -L) are: +.TP +.B -i, --cache-id +Identifier of cache instance <1-16384>. + +.TP +.B -o --output-format {table|csv} +Defines output format for printed IO class configuration. It can be either +\fBtable\fR (default) or \fBcsv\fR. + +.SH Options that are valid with --nvme --format (-N -F) are: + +.TP +.B -d, --device +Path to NVMe device to be formatted (e.g. /dev/nvme0). + +.TP +.B -f, --force +Force to format NVMe device. By default device will not be formatted if utility +detects on the device file system or presence of dirty data after cache dirty +shutdown. This parameter formats NVMe namespace regardless to this situations. + + +.SH Command --help (-H) does not accept any options. +.BR + +.SH Options that are valid with --version (-V) are: + +.TP +.B -o --output-format {table|csv} +Defines output format. It can be either \fBtable\fR (default) or \fBcsv\fR. + + +.SH ENVIRONMENT VARIABLES +Following environment variables affect behavior of casadm administrative utility: +.TP +.B LANG +If en_US.utf-8, en_US.UTF-8 is configured, tables displayed by -L/--list-caches, +-P/--stats and -C -L/--io-class --list are formatted using Unicode table drawing +characters. Otherwise only '+', '|' and '-' are used. + +.TP +.B TERM +If xterm or screen is used, colors are used for formatting tables. Otherwise, +color is not used. Additionally colors are NOT used if standard output of +casadm isn't a TTY (i.e. it's output is displayed via less(1), watch(1) or +redirected to a file) + +.TP +.B CASADM_COLORS +If this variable is set, colors are used even if TERM isn't set to xterm/screen +or when output is redirected to another program. It's convenient to do: +CASADM_COLORS=true screen 'casadm -P -i 1' + +.TP +.B CASADM_NO_LINE_BREAK +If CASADM_NO_LINE_BREAK is set, casadm won't break lines for tables displayed +by -L/--list-caches, -P/--stats and -C -L/--io-class --list + + +.SH REPORTING BUGS +Patches and issues may be submitted to the official repository at +\fBhttps://open-cas.github.io\fR + +.SH SEE ALSO +.TP +casctl(8), opencas.conf(5) diff --git a/utils/casctl b/utils/casctl new file mode 100755 index 000000000..89fb7bd61 --- /dev/null +++ b/utils/casctl @@ -0,0 +1,142 @@ +#!/usr/bin/env python2 +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +from __future__ import print_function +import argparse +import sys +import re +import opencas + +def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + +# Start - load all the caches and add cores + +def start(): + try: + config = opencas.cas_config.from_file('/etc/opencas/opencas.conf', + allow_incomplete=True) + except Exception as e: + eprint(e) + eprint('Unable to parse config file.') + exit(1) + + for cache in config.caches.values(): + try: + opencas.start_cache(cache, True) + except opencas.casadm.CasadmError as e: + eprint('Unable to load cache {0} ({1}). Reason:\n{2}' + .format(cache.cache_id, cache.device, e.result.stderr)) + +# Initial cache start + +def add_core_recursive(core, config): + with_error = False + if core.added: + return with_error + if core.marked: + eprint('Unable to add core {0} to cache {1}. Reason:\nRecursive core configuration!' + .format(core.device, core.cache_id)) + exit(3) + core.marked = True + match = re.match('/dev/cas(\d)-(\d).*', core.device) + if match: + cache_id,core_id = match.groups() + with_error = add_core_recursive(config.caches[int(cache_id)].cores[int(core_id)], config) + try: + opencas.add_core(core, False) + core.added = True + except opencas.casadm.CasadmError as e: + eprint('Unable to add core {0} to cache {1}. Reason:\n{2}' + .format(core.device, core.cache_id, e.result.stderr)) + with_error = True + return with_error + +def init(force): + exit_code = 0 + try: + config = opencas.cas_config.from_file('/etc/opencas/opencas.conf') + except Exception as e: + eprint(e) + eprint('Unable to parse config file.') + exit(1) + + if not force: + for cache in config.caches.values(): + try: + status = opencas.check_cache_device(cache.device) + if status['Is cache'] == 'yes' and status['Cache dirty'] == 'yes': + eprint('Unable to perform initial configuration.\n' \ + 'One of cache devices contains dirty data.') + exit(1) + except opencas.casadm.CasadmError as e: + eprint('Unable to check status of device {0}. Reason:\n{1}' + .format(cache.device, e.result.stderr)) + exit(e.result.exit_code) + + for cache in config.caches.values(): + try: + opencas.start_cache(cache, False, force) + except opencas.casadm.CasadmError as e: + eprint('Unable to start cache {0} ({1}). Reason:\n{2}' + .format(cache.cache_id, cache.device, e.result.stderr)) + exit_code = 2 + try: + opencas.configure_cache(cache) + except opencas.casadm.CasadmError as e: + eprint('Unable to configure cache {0} ({1}). Reason:\n{2}' + .format(cache.cache_id, cache.device, e.result.stderr)) + exit_code = 2 + + for core in config.cores: + core.added = False + core.marked = False + for core in config.cores: + with_error = add_core_recursive(core, config) + if with_error: + exit_code = 2 + + exit(exit_code) + +# Stop - detach cores and stop caches +def stop(flush): + try: + opencas.stop(flush) + except Exception as e: + eprint(e) + +# Command line arguments parsing + +class cas: + def __init__(self): + parser = argparse.ArgumentParser(prog = 'cas') + subparsers = parser.add_subparsers(title = 'actions') + + parser_init = subparsers.add_parser('init', help = 'Setup initial configuration') + parser_init.set_defaults(command='init') + parser_init.add_argument ('--force', action='store_true', help = 'Force cache start') + + parser_start = subparsers.add_parser('start', help = 'Start cache configuration') + parser_start.set_defaults(command='start') + + parser_stop = subparsers.add_parser('stop', help = 'Stop cache configuration') + parser_stop.set_defaults(command='stop') + parser_stop.add_argument ('--flush', action='store_true', help = 'Flush data before stopping') + + args = parser.parse_args(sys.argv[1:]) + getattr(self, 'command_' + args.command)(args) + + def command_init(self, args): + init(args.force) + + def command_start(self, args): + start() + + def command_stop(self, args): + stop(args.flush) + +if __name__ == '__main__': + cas() diff --git a/utils/casctl.8 b/utils/casctl.8 new file mode 100644 index 000000000..97d8d2fd0 --- /dev/null +++ b/utils/casctl.8 @@ -0,0 +1,63 @@ +.TH casctl.8 __CAS_DATE__ v__CAS_VERSION__ +.SH NAME +casctl \- whole-configuration-manager for Open CAS. + + +.SH SYNOPSIS + +\fBcasctl\fR [options...] + +.SH COPYRIGHT +Copyright(c) 2012-2019 by the Intel Corporation. + +.SH COMMANDS +.TP +.B start +Start all cache instances. + +.TP +.B stop +Stop all cache instances. + +.TP +.B init +Initial configuration of caches and core devices. + +.br +.B CAUTION +.br +May be used if there is no metadata on cache device or if metatata exists, then only if it's all clean. + +.TP +.B -h, --help + + +.SH OPTIONS + +.TP +.SH Command start does not accept any options. + +.TP +.SH Options that are valid with stop are: + +.TP +.B --flush +Flush data before stopping. + +.TP +.SH Options that are valid with init are: + +.TP +.B --force +Force cache start even if cache device contains partitions or metadata from previously running cache instances. + +.TP +.SH Command --help (-h) does not accept any options. + +.SH REPORTING BUGS +Patches and issues may be submitted to the official repository at +\fBhttps://open-cas.github.io\fR + +.SH SEE ALSO +.TP +casadm(8), opencas.conf(5) diff --git a/utils/ext3-config.csv b/utils/ext3-config.csv new file mode 100644 index 000000000..070c9f843 --- /dev/null +++ b/utils/ext3-config.csv @@ -0,0 +1,24 @@ +IO class id,IO class name,Eviction priority,Allocation +0,Unclassified,22,1 +1,Superblock,0,1 +2,GroupDesc,1,1 +3,BlockBitmap,2,1 +4,InodeBitmap,3,1 +5,Inode,4,1 +6,IndirectBlk,5,1 +7,Directory,6,1 +8,Journal,7,1 +10,Xatrr,8,1 +11,<=4KiB,9,1 +12,<=16KiB,10,1 +13,<=64KiB,11,1 +14,<=256KiB,12,1 +15,<=1MiB,13,1 +16,<=4MiB,14,1 +17,<=16MiB,15,1 +18,<=64MiB,16,1 +19,<=256MiB,17,1 +20,<=1GiB,18,1 +21,>1GiB,19,1 +22,O_DIRECT,20,1 +23,Misc,21,1 diff --git a/utils/ext4-config.csv b/utils/ext4-config.csv new file mode 100644 index 000000000..1d04c9f22 --- /dev/null +++ b/utils/ext4-config.csv @@ -0,0 +1,25 @@ +IO class id,IO class name,Eviction priority,Allocation +0,Unclassified,23,1 +1,Superblock,0,1 +2,GroupDesc,1,1 +3,BlockBitmap,2,1 +4,InodeBitmap,3,1 +5,Inode,4,1 +6,IndirectBlk,5,1 +7,Directory,6,1 +8,Journal,7,1 +9,Extent,8,1 +10,Xatrr,9,1 +11,<=4KiB,10,1 +12,<=16KiB,11,1 +13,<=64KiB,12,1 +14,<=256KiB,13,1 +15,<=1MiB,14,1 +16,<=4MiB,15,1 +17,<=16MiB,16,1 +18,<=64MiB,17,1 +19,<=256MiB,18,1 +20,<=1GiB,19,1 +21,>1GiB,20,1 +22,O_DIRECT,21,1 +23,Misc,22,1 diff --git a/utils/ioclass-config.csv b/utils/ioclass-config.csv new file mode 100644 index 000000000..bdcd81a29 --- /dev/null +++ b/utils/ioclass-config.csv @@ -0,0 +1,15 @@ +IO class id,IO class name,Eviction priority,Allocation +0,unclassified,22,1 +1,metadata&done,0,1 +11,file_size:le:4096&done,9,1 +12,file_size:le:16384&done,10,1 +13,file_size:le:65536&done,11,1 +14,file_size:le:262144&done,12,1 +15,file_size:le:1048576&done,13,1 +16,file_size:le:4194304&done,14,1 +17,file_size:le:16777216&done,15,1 +18,file_size:le:67108864&done,16,1 +19,file_size:le:268435456&done,17,1 +20,file_size:le:1073741824&done,18,1 +21,file_size:gt:1073741824&done,19,1 +22,direct&done,20,1 diff --git a/utils/open-cas-loader b/utils/open-cas-loader new file mode 100755 index 000000000..e1ffe31f1 --- /dev/null +++ b/utils/open-cas-loader @@ -0,0 +1,56 @@ +#!/usr/bin/env python2 +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +from __future__ import print_function +import subprocess +import time +import opencas +import sys +import os +import syslog as sl + +def wait_for_cas_ctrl(): + for i in range(30): # timeout 30s + if os.path.exists('/dev/cas_ctrl'): + return + time.sleep(1) + +try: + subprocess.call(['/sbin/modprobe', 'cas_cache']) +except: + sl.syslog(sl.LOG_ERR, 'Unable to probe cas_cache module') + exit(1) + +try: + config = opencas.cas_config.from_file('/etc/opencas/opencas.conf', + allow_incomplete=True) +except Exception as e: + sl.syslog(sl.LOG_ERR, + 'Unable to load opencas config. Reason: {0}'.format(str(e))) + exit(1) + +for cache in config.caches.values(): + if sys.argv[1] == os.path.realpath(cache.device): + try: + wait_for_cas_ctrl() + opencas.start_cache(cache, True) + except opencas.casadm.CasadmError as e: + sl.syslog(sl.LOG_WARNING, + 'Unable to load cache {0} ({1}). Reason: {2}' + .format(cache.cache_id, cache.device, e.result.stderr)) + exit(e.result.exit_code) + exit(0) + for core in cache.cores.values(): + if sys.argv[1] == os.path.realpath(core.device): + try: + wait_for_cas_ctrl() + opencas.add_core(core, True) + except opencas.casadm.CasadmError as e: + sl.syslog(sl.LOG_WARNING, + 'Unable to attach core {0} from cache {1}. Reason: {2}' + .format(core.device, cache.cache_id, e.result.stderr)) + exit(e.result.exit_code) + exit(0) diff --git a/utils/open-cas-mount-utility b/utils/open-cas-mount-utility new file mode 100755 index 000000000..687197cab --- /dev/null +++ b/utils/open-cas-mount-utility @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +# Find all mount units, cut to remove list-units decorations +logger "Open CAS Mount Utility checking for $1 FS label..." +MOUNT_UNITS=`systemctl --plain list-units | grep \.mount | grep -v '\-\.mount' | awk '{print $1}'` + +for unit in $MOUNT_UNITS +do + # Find BindsTo keyword, pry out FS label from the .device unit name + label=`systemctl show $unit | grep BindsTo | sed "s/.*label\-\(.*\)\.device/\1/;tx;d;:x"` + if [ "$label" == "" ]; then + continue + fi + label_unescaped=$(systemd-escape -u $(systemd-escape -u $label)) + if [ "$label_unescaped" == "$1" ]; then + # If FS label matches restart unit + logger "Open CAS Mount Utility restarting $unit..." + systemctl restart $unit &> /dev/null + exit 0 + fi +done + diff --git a/utils/open-cas-shutdown b/utils/open-cas-shutdown new file mode 100644 index 000000000..2baa5086c --- /dev/null +++ b/utils/open-cas-shutdown @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# +# +# open-cas-shutdown Stops Open CAS +# +# chkconfig: 235 05 95 +# description: Open Cache Acceleration Software Shutdown Trigger +# +# processname: open-cas-shutdown + +### BEGIN INIT INFO +# Provides: open-cas-shutdown +# Required-Start: $local_fs +# Required-Stop: $local_fs +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: Open Cache Acceleration Software Shutdown Trigger +# Description: Open Cache Acceleration Software Shutdown Trigger +### END INIT INFO + +# Execution flow + +runfile=/var/lock/subsys/open-cas-shutdown + +function umount_cache_volumes() +{ + BLOCK_DEV_PREFIX=/dev/cas + INSTANCES=`ls ${BLOCK_DEV_PREFIX}* | egrep [1-9][0-9]*-[1-9][0-9]*` + for inst in $INSTANCES ; do + # Umount any mounted Open CAS devices first + if [[ `cat /etc/mtab | grep $inst | wc -l` -gt 0 ]] ; then + umount $inst &> /dev/null + fi + done +} + +case "$1" in + start|restart|reload) + mkdir -p `dirname $runfile` + touch $runfile + exit 0 + ;; + status) + exit 0 + ;; + stop) + umount_cache_volumes + /sbin/cas stop + rm -f $runfile + exit $? + ;; + *) + exit 1 +esac diff --git a/utils/open-cas-shutdown.service b/utils/open-cas-shutdown.service new file mode 100644 index 000000000..075cdde31 --- /dev/null +++ b/utils/open-cas-shutdown.service @@ -0,0 +1,14 @@ +[Unit] +Description=Open Cache Acceleration Software Shutdown Trigger +After=umount.target +Before=final.target +JobTimeoutSec=604800 +DefaultDependencies=no + +[Service] +Type=oneshot +ExecStart=/sbin/casctl stop +TimeoutStopSec=604800 + +[Install] +WantedBy=final.target diff --git a/utils/open-cas.shutdown b/utils/open-cas.shutdown new file mode 100755 index 000000000..fd88214d6 --- /dev/null +++ b/utils/open-cas.shutdown @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +# systemd-shutdown plugin to stop all remaining Open CAS devices + +/usr/bin/echo "Open CAS cleanup handler" > /dev/kmsg +/sbin/casctl stop diff --git a/utils/opencas.conf b/utils/opencas.conf new file mode 100644 index 000000000..f1de7834a --- /dev/null +++ b/utils/opencas.conf @@ -0,0 +1,29 @@ +version=19.3.0 +# Version tag has to be first line in this file +# +# Open CAS configuration file - for reference on syntax +# of this file please refer to appropriate documentation + +# NOTES: +# 1) It is highly recommended to specify cache/core device using path +# that is constant across reboots - e.g. disk device links in +# /dev/disk/by-id/, preferably those using device WWN if available: +# /dev/disk/by-id/wwn-0x123456789abcdef0 +# Referencing devices via /dev/sd* may result in cache misconfiguration after +# system reboot due to change(s) in drive order. + +## Caches configuration section +[caches] +## Cache ID Cache device Cache mode Extra fields (optional) +## Uncomment and edit the below line for cache configuration +#1 /dev/disk/by-id/nvme-INTEL_SSDP.. WT + +## Core devices configuration +[cores] +## Cache ID Core ID Core device +## Uncomment and edit the below line for core configuration +#1 1 /dev/disk/by-id/wwn-0x123456789abcdef0 + +## To specify use of the IO Classification file, place content of the following line in the +## Caches configuration section under Extra fields (optional) +## ioclass_file=/etc/opencas/ioclass-config.csv diff --git a/utils/opencas.conf.5 b/utils/opencas.conf.5 new file mode 100644 index 000000000..4b4ab6454 --- /dev/null +++ b/utils/opencas.conf.5 @@ -0,0 +1,61 @@ +.TH opencas.conf 5 __CAS_DATE__ v_CAS_VERSION__ +.SH NAME +opencas.conf \- cas configuration file. + + +.SH SYNOPSIS +.B /etc/opencas/opencas.conf + +.SH COPYRIGHT +Copyright(c) 2012-2019 by the Intel Corporation. + +.SH DESCRIPTION +Contains configurations for cache and core devices to run cache on system startup. Constist of following sections: +.RS 3 +.TP +\fBversion\fR First line has to be version tag. +.TP +\fB[caches]\fR Caches configuration. Following columns are required: +.RS 5 +.IP +Cache ID <1-16384> +.br +Cache device +.br +Cache mode {wt|wb|wa|pt} +.br +Extra fields (optional) ioclass_file=,cleaning_policy= +.RE +.TP +\fB[cores]\fR Cores configuration. Following columns are required: +.RS 5 +.IP +Cache ID <1-16384> +.br +Core ID <0-4095> +.br +Core device +.br +.RE +.TP +\fBNOTES\fR +.RS +1) It is highly recommended to specify cache/core device using path that is constant across reboots - e.g. disk device links in /dev/disk/by-id/, preferably those using device WWN if available: /dev/disk/by-id/wwn-0x123456789abcdef0. Referencing devices via /dev/sd* may result in cache misconfiguration after system reboot due to change(s) in drive order. +.TP +2) To specify use of the IC Classification file, place ioclass_file=path/to/file.csv in caches configuration section under Extra fields (optional) + + +.SH FILES +.TP +/etc/opencas/opencas.conf +Contains configurations for cache and core devices to run cache on system startup or via cas tool. + +.SH REPORTING BUGS +Patches and issues may be submitted to the official repository at +\fBhttps://open-cas.github.io\fR + + +.SH SEE ALSO +.TP +casctl(8), casadm(8) + diff --git a/utils/opencas.py b/utils/opencas.py new file mode 100644 index 000000000..6068f4e67 --- /dev/null +++ b/utils/opencas.py @@ -0,0 +1,678 @@ +#!/usr/bin/env python2 +# +# Copyright(c) 2012-2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import subprocess +import csv +import re +import os +import stat + +# Casadm functionality + + +class casadm: + casadm_path = '/sbin/casadm' + + class result: + def __init__(self, cmd): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self.exit_code = p.wait() + output = p.communicate() + self.stdout = output[0] + self.stderr = output[1] + + class CasadmError(Exception): + def __init__(self, result): + super(casadm.CasadmError, self).__init__('casadm error') + self.result = result + + @classmethod + def run_cmd(cls, cmd): + result = cls.result(cmd) + if result.exit_code != 0: + raise cls.CasadmError(result) + return result + + @classmethod + def get_version(cls): + cmd = [cls.casadm_path, + '--version', + '--output-format', 'csv'] + return cls.run_cmd(cmd) + + @classmethod + def list_caches(cls): + cmd = [cls.casadm_path, + '--list-caches', + '--output-format', 'csv'] + return cls.run_cmd(cmd) + + @classmethod + def check_cache_device(cls, device): + cmd = [cls.casadm_path, + '--script', + '--check-cache-device', + '--cache-device', device] + return cls.run_cmd(cmd) + + @classmethod + def start_cache(cls, device, cache_id=None, cache_mode=None, + cache_line_size=None, load=False, force=False): + cmd = [cls.casadm_path, + '--start-cache', + '--cache-device', device] + if cache_id: + cmd += ['--cache-id', str(cache_id)] + if cache_mode: + cmd += ['--cache-mode', cache_mode] + if cache_line_size: + cmd += ['--cache-line-size', str(cache_line_size)] + if load: + cmd += ['--load'] + if force: + cmd += ['--force'] + return cls.run_cmd(cmd) + + @classmethod + def add_core(cls, device, cache_id, core_id=None, try_add=False): + cmd = [cls.casadm_path, + '--script', + '--add-core', + '--core-device', device, + '--cache-id', str(cache_id)] + if core_id is not None: + cmd += ['--core-id', str(core_id)] + if try_add: + cmd += ['--try-add'] + return cls.run_cmd(cmd) + + @classmethod + def stop_cache(cls, cache_id, no_flush=False): + cmd = [cls.casadm_path, + '--stop-cache', + '--cache-id', str(cache_id)] + if no_flush: + cmd += ['--no-data-flush'] + return cls.run_cmd(cmd) + + @classmethod + def remove_core(cls, cache_id, core_id, detach=False, force=False): + cmd = [cls.casadm_path, + '--script', + '--remove-core', + '--cache-id', str(cache_id), + '--core-id', str(core_id)] + if detach: + cmd += ['--detach'] + if force: + cmd += ['--no-flush'] + return cls.run_cmd(cmd) + + @classmethod + def set_param(cls, namespace, cache_id, **kwargs): + cmd = [cls.casadm_path, + '--set-param', '--name', namespace, + '--cache-id', str(cache_id)] + + for param, value in kwargs.items(): + cmd += ['--'+param.replace('_', '-'), str(value)] + + return cls.run_cmd(cmd) + + @classmethod + def get_params(cls, namespace, cache_id, **kwargs): + cmd = [cls.casadm_path, + '--get-param', '--name', namespace, + '--cache-id', str(cache_id)] + + for param, value in kwargs.items(): + cmd += ['--'+param.replace('_', '-'), str(value)] + + cmd += ['-o', 'csv'] + + return cls.run_cmd(cmd) + + @classmethod + def flush_parameters(cls, cache_id, policy_type): + cmd = [cls.casadm_path, + '--flush-parameters', + '--cache-id', str(cache_id), + '--cleaning-policy-type', policy_type] + return cls.run_cmd(cmd) + + @classmethod + def io_class_load_config(cls, cache_id, ioclass_file): + cmd = [cls.casadm_path, + '--io-class', + '--load-config', + '--cache-id', str(cache_id), + '--file', ioclass_file] + return cls.run_cmd(cmd) + +# Configuration file parser + + +class cas_config(object): + default_location = '/etc/opencas/opencas.conf' + + class ConflictingConfigException(ValueError): + pass + + class AlreadyConfiguredException(ValueError): + pass + + @staticmethod + def get_by_id_path(path): + for id_path in os.listdir('/dev/disk/by-id'): + full_path = '/dev/disk/by-id/{0}'.format(id_path) + if os.path.realpath(full_path) == os.path.realpath(path): + return full_path + + raise ValueError('By-id device link not found for {0}'.format(path)) + + @staticmethod + def check_block_device(path): + if not os.path.exists(path) and path.startswith('/dev/cas'): + return + + try: + mode = os.stat(path).st_mode + except: + raise ValueError('{0} not found'.format(path)) + + if not stat.S_ISBLK(mode): + raise ValueError('{0} is not block device'.format(path)) + + class cache_config(object): + def __init__(self, cache_id, device, cache_mode, **params): + self.cache_id = int(cache_id) + self.device = device + self.cache_mode = cache_mode + self.params = params + self.cores = dict() + + @classmethod + def from_line(cls, line, allow_incomplete=False): + values = line.split() + if len(values) < 3: + raise ValueError('Invalid cache configuration (too few columns)') + elif len(values) > 4: + raise ValueError('Invalid cache configuration (too many columns)') + + cache_id = int(values[0]) + device = values[1] + cache_mode = values[2].lower() + + params = dict() + if len(values) > 3: + for param in values[3].split(','): + param_name, param_value = param.split('=') + if param_name in params: + raise ValueError('Invalid cache configuration (repeated parameter') + params[param_name] = param_value + + cache_config = cls(cache_id, device, cache_mode, **params) + cache_config.validate_config(False, allow_incomplete) + + return cache_config + + def validate_config(self, force, allow_incomplete=False): + type(self).check_cache_id_valid(self.cache_id) + self.check_recursive() + self.check_cache_mode_valid(self.cache_mode) + for param_name, param_value in self.params.iteritems(): + self.validate_parameter(param_name, param_value) + + if not allow_incomplete: + cas_config.check_block_device(self.device) + if not force: + self.check_cache_device_empty() + + def validate_parameter(self, param_name, param_value): + if param_name == 'ioclass_file': + if not os.path.exists(param_value): + raise ValueError('Incorrect path to io_class file') + elif param_name == 'cleaning_policy': + self.check_cleaning_policy_valid(param_value) + elif param_name == 'cache_line_size': + self.check_cache_line_size_valid(param_value) + else: + raise ValueError('{0} is unknown parameter name'.format(param_name)) + + @staticmethod + def check_cache_id_valid(cache_id): + if not 1 <= int(cache_id) <= 16384: + raise ValueError('{0} is invalid cache id'.format(cache_id)) + + def check_cache_device_empty(self): + try: + result = casadm.run_cmd(['lsblk', '-o', 'NAME', '-l', '-n', self.device]) + except: + # lsblk returns non-0 if it can't probe for partitions + # this means that we're probably dealing with atomic device + # let it through + return + + if len(list(filter(lambda a: a != '', result.stdout.split('\n')))) > 1: + raise ValueError( + 'Partitions found on device {0}. Use force option to ignore'. + format(self.device)) + + def check_cache_mode_valid(self, cache_mode): + if cache_mode.lower() not in ['wt', 'pt', 'wa', 'wb']: + raise ValueError('Invalid cache mode {0}'.format(cache_mode)) + + def check_cleaning_policy_valid(self, cleaning_policy): + if cleaning_policy.lower() not in ['acp', 'alru', 'nop']: + raise ValueError('{0} is invalid cleaning policy name'.format( + cleaning_policy)) + + def check_cache_line_size_valid(self, cache_line_size): + if cache_line_size not in ['4', '8', '16', '32', '64']: + raise ValueError('{0} is invalid cache line size'.format( + cache_line_size)) + + def check_recursive(self): + if not self.device.startswith('/dev/cas'): + return + + ids = self.device.split('/dev/cas')[1] + device_cache_id, _ = ids.split('-') + + if int(device_cache_id) == self.cache_id: + raise ValueError('Recursive configuration detected') + + def to_line(self): + ret = '{0}\t{1}\t{2}'.format(self.cache_id, self.device, self.cache_mode) + if len(self.params) > 0: + i = 0 + for param, value in self.params.iteritems(): + if i > 0: + ret += ',' + else: + ret += '\t' + + ret += '{0}={1}'.format(param, value) + i += 1 + ret += '\n' + + return ret + + class core_config(object): + def __init__(self, cache_id, core_id, path): + self.cache_id = int(cache_id) + self.core_id = int(core_id) + self.device = path + + @classmethod + def from_line(cls, line, allow_incomplete=False): + values = line.split() + if len(values) > 3: + raise ValueError('Invalid core configuration (too many columns)') + elif len(values) < 3: + raise ValueError('Invalid core configuration (too few columns)') + + cache_id = int(values[0]) + core_id = int(values[1]) + device = values[2] + + core_config = cls(cache_id, core_id, device) + + core_config.validate_config(allow_incomplete) + + return core_config + + def validate_config(self, allow_incomplete=False): + self.check_core_id_valid() + self.check_recursive() + cas_config.cache_config.check_cache_id_valid(self.cache_id) + if not allow_incomplete: + cas_config.check_block_device(self.device) + + def check_core_id_valid(self): + if not 0 <= int(self.core_id) <= 4095: + raise ValueError('{0} is invalid core id'.format(self.core_id)) + + def check_recursive(self): + if not self.device.startswith('/dev/cas'): + return + + ids = self.device.split('/dev/cas')[1] + device_cache_id, _ = ids.split('-') + + if int(device_cache_id) == self.cache_id: + raise ValueError('Recursive configuration detected') + + def to_line(self): + return '{0}\t{1}\t{2}\n'.format(self.cache_id, self.core_id, self.device) + + def __init__(self, caches=None, cores=None, version_tag=None): + self.caches = caches if caches else dict() + + self.cores = cores if cores else list() + + self.version_tag = version_tag + + @classmethod + def from_file(cls, config_file, allow_incomplete=False): + section_caches = False + section_cores = False + + try: + with open(config_file, 'r') as conf: + version_tag = conf.readline() + if not re.findall(r'^version=.*$', version_tag): + raise ValueError('No version tag found!') + + config = cls(version_tag=version_tag) + + for line in conf: + line = line.split('#')[0].rstrip() + if not line: + continue + + if line == '[caches]': + section_caches = True + continue + + if line == '[cores]': + section_caches = False + section_cores = True + continue + + if section_caches: + cache = cas_config.cache_config.from_line(line, allow_incomplete) + config.insert_cache(cache) + elif section_cores: + core = cas_config.core_config.from_line(line, allow_incomplete) + config.insert_core(core) + except ValueError: + raise + except IOError: + raise Exception('Couldn\'t open config file') + except: + raise + + return config + + def insert_cache(self, new_cache_config): + if new_cache_config.cache_id in self.caches: + if (os.path.realpath(self.caches[new_cache_config.cache_id].device) + != os.path.realpath(new_cache_config.device)): + raise cas_config.ConflictingConfigException( + 'Other cache device configured under this id') + else: + raise cas_config.AlreadyConfiguredException( + 'Cache already configured') + + for cache_id, cache in self.caches.iteritems(): + if cache_id != new_cache_config.cache_id: + if (os.path.realpath(new_cache_config.device) + == os.path.realpath(cache.device)): + raise cas_config.ConflictingConfigException( + 'This cache device is already configured as a cache') + + for _, core in cache.cores.iteritems(): + if (os.path.realpath(core.device) + == os.path.realpath(new_cache_config.device)): + raise cas_config.ConflictingConfigException( + 'This cache device is already configured as a core') + + try: + new_cache_config.device = cas_config.get_by_id_path(new_cache_config.device) + except: + pass + + self.caches[new_cache_config.cache_id] = new_cache_config + + def insert_core(self, new_core_config): + if new_core_config.cache_id not in self.caches: + raise KeyError('Cache id {0} doesn\'t exist'.format(new_core_config.cache_id)) + + try: + for cache_id, cache in self.caches.iteritems(): + if (os.path.realpath(cache.device) + == os.path.realpath(new_core_config.device)): + raise cas_config.ConflictingConfigException( + 'Core device already configured as a cache') + + for core_id, core in cache.cores.iteritems(): + if (cache_id == new_core_config.cache_id + and core_id == new_core_config.core_id): + if (os.path.realpath(core.device) + == os.path.realpath(new_core_config.device)): + raise cas_config.AlreadyConfiguredException( + 'Core already configured') + else: + raise cas_config.ConflictingConfigException( + 'Other core device configured under this id') + else: + if (os.path.realpath(core.device) + == os.path.realpath(new_core_config.device)): + raise cas_config.ConflictingConfigException( + 'This core device is already configured as a core') + except KeyError: + pass + + try: + new_core_config.device = cas_config.get_by_id_path(new_core_config.device) + except: + pass + + self.caches[new_core_config.cache_id].cores[new_core_config.core_id] = new_core_config + self.cores += [new_core_config] + + def is_empty(self): + if len(self.caches) > 0 or len(self.cores) > 0: + return False + + return True + + def write(self, config_file): + try: + with open(config_file, 'w') as conf: + conf.write('{0}\n'.format(self.version_tag)) + conf.write('# This config was automatically generated\n') + + conf.write('[caches]\n') + for _, cache in self.caches.iteritems(): + conf.write(cache.to_line()) + + conf.write('\n[cores]\n') + for core in self.cores: + conf.write(core.to_line()) + + except: + raise Exception('Couldn\'t write config file') + +# Config helper functions + + +def start_cache(cache, load, force=False): + casadm.start_cache( + device=cache.device, + cache_id=cache.cache_id, + cache_mode=cache.cache_mode, + cache_line_size=cache.params.get('cache_line_size'), + load=load, + force=force) + +def configure_cache(cache): + if cache.params.has_key('cleaning_policy'): + casadm.set_param('cleaning', cache_id=cache.cache_id, + policy=cache.params['cleaning_policy']) + if cache.params.has_key('ioclass_file'): + casadm.io_class_load_config(cache_id=cache.cache_id, + ioclass_file=cache.params['ioclass_file']) + +def add_core(core, attach): + casadm.add_core( + device=core.device, + cache_id=core.cache_id, + core_id=core.core_id, + try_add=attach) + +# Another helper functions + +def is_cache_started(cache_config): + dev_list = get_caches_list() + for dev in dev_list: + if dev['type'] == 'cache' and int(dev['id']) == cache_config.cache_id: + return True + + return False + +def is_core_added(core_config): + dev_list = get_caches_list() + cache_id = 0 + for dev in dev_list: + if dev['type'] == 'cache': + cache_id = int(dev['id']) + + if (dev['type'] == 'core' and + cache_id == core_config.cache_id and + int(dev['id']) == core_config.core_id): + return True + + return False + +def get_caches_list(): + result = casadm.list_caches() + return list(csv.DictReader(result.stdout.split('\n'))) + +def check_cache_device(device): + result = casadm.check_cache_device(device) + return list(csv.DictReader(result.stdout.split('\n')))[0] + +def get_cas_version(): + version = casadm.get_version() + + ret = {} + for line in version.stdout.split('\n')[1:]: + try: + component, version = line.split(',') + except: + continue + ret[component] = version + + return ret + + +class CompoundException(Exception): + def __init__(self): + super(CompoundException, self).__init__() + self.exception_list = list() + + def __str__(self): + s = "Multiple exceptions occured:\n" if len(self.exception_list) > 1 else "" + + for e in self.exception_list: + s += '{0}\n'.format(str(e)) + + return s + + def add_exception(self, e): + if type(e) is CompoundException: + self.exception_list += e.exception_list + else: + self.exception_list += [e] + + def is_empty(self): + return len(self.exception_list) == 0 + + def raise_nonempty(self): + if self.is_empty(): + return + else: + raise self + +def detach_core_recursive(cache_id, core_id, flush): + # Catching exceptions is left to uppermost caller of detach_core_recursive + # as the immediate caller that made a recursive call depends on the callee + # to remove core and thus release reference to lower level cache volume. + l_cache_id = '' + for dev in get_caches_list(): + if dev['type'] == 'cache': + l_cache_id = dev['id'] + elif dev['type'] == 'core' and dev['status'] == 'Active': + if '/dev/cas{0}-{1}'.format(cache_id, core_id) in dev['disk']: + detach_core_recursive(l_cache_id, dev['id'], flush) + elif l_cache_id == cache_id and dev['id'] == core_id and dev['status'] != 'Active': + return + + casadm.remove_core(cache_id, core_id, detach = True, force = not flush) + +def detach_all_cores(flush): + error = CompoundException() + + try: + dev_list = get_caches_list() + except casadm.CasadmError as e: + raise Exception('Unable to list caches. Reason:\n{0}'.format( + e.result.stderr)) + except: + raise Exception('Unable to list caches.') + + for dev in dev_list: + if dev['type'] == 'cache': + cache_id = dev['id'] + elif dev['type'] == 'core' and dev['status'] == "Active": + # In case of exception we proceed with detaching remaining core instances + # to gracefully shutdown as many cache instances as possible. + try: + detach_core_recursive(cache_id, dev['id'], flush) + except casadm.CasadmError as e: + error.add_exception(Exception( + 'Unable to detach core {0}. Reason:\n{1}'.format( + dev['disk'], e.result.stderr))) + except: + error.add_exception(Exception( + 'Unable to detach core {0}.'.format(dev['disk']))) + + error.raise_nonempty() + +def stop_all_caches(flush): + error = CompoundException() + + try: + dev_list = get_caches_list() + except casadm.CasadmError as e: + raise Exception('Unable to list caches. Reason:\n{0}'.format( + e.result.stderr)) + except: + raise Exception('Unable to list caches.') + + for dev in dev_list: + if dev['type'] == 'cache': + # In case of exception we proceed with stopping subsequent cache instances + # to gracefully shutdown as many cache instances as possible. + try: + casadm.stop_cache(dev['id'], not flush) + except casadm.CasadmError as e: + error.add_exception(Exception( + 'Unable to stop cache {0}. Reason:\n{1}'.format( + dev['disk'], e.result.stderr))) + except: + error.add_exception(Exception( + 'Unable to stop cache {0}.'.format(dev['disk']))) + + error.raise_nonempty() + +def stop(flush): + error = CompoundException() + + try: + detach_all_cores(flush) + except Exception as e: + error.add_exception(e) + + try: + stop_all_caches(False) + except Exception as e: + error.add_exception(e) + + error.raise_nonempty() +