From 460cd461d3ffd0376690c56dfceb7cca5399776c Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Mon, 25 Sep 2023 15:07:30 +0200 Subject: [PATCH 1/3] Allocate requests for management path separately Management path does not benefit much from mpools, as number of requests allocated is very small. It's less restrictive (mngt_queue does not have single-CPU affinity) thus avoiding mpool usage in management path allows to introduce additional restrictions on mpool, leading to I/O performance improvement. Signed-off-by: Robert Baldyga Signed-off-by: Michal Mielewczyk --- src/metadata/metadata_raw_dynamic.c | 2 +- src/mngt/ocf_mngt_flush.c | 2 +- src/ocf_core.c | 2 +- src/ocf_core_priv.h | 3 + src/ocf_request.c | 126 +++++++++++++++++++++++----- src/ocf_request.h | 25 ++++++ src/utils/utils_cleaner.c | 3 +- src/utils/utils_parallelize.c | 3 +- src/utils/utils_pipeline.c | 2 +- 9 files changed, 138 insertions(+), 30 deletions(-) diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c index d426ff98..bde407b1 100644 --- a/src/metadata/metadata_raw_dynamic.c +++ b/src/metadata/metadata_raw_dynamic.c @@ -493,7 +493,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw, goto err_zpage; } - context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); + context->req = ocf_req_new_mngt(cache->mngt_queue); if (!context->req) { result = -OCF_ERR_NO_MEM; goto err_req; diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index 84bebc5e..d4dcda33 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -432,7 +432,7 @@ static void _ocf_mngt_flush_container( fc->end = end; fc->context = context; - req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); + req = ocf_req_new_mngt(cache->mngt_queue); if (!req) { error = OCF_ERR_NO_MEM; goto finish; diff --git a/src/ocf_core.c b/src/ocf_core.c index f7d1481b..38f324a6 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -154,7 +154,7 @@ static uint64_t _calc_dirty_for(uint64_t dirty_since) return dirty_since ? (current_time - dirty_since) : 0; } -static inline struct ocf_request *ocf_io_to_req(struct ocf_io *io) +struct ocf_request *ocf_io_to_req(struct ocf_io *io) { struct ocf_io_internal *ioi; diff --git a/src/ocf_core_priv.h b/src/ocf_core_priv.h index 879d93c2..da66e082 100644 --- a/src/ocf_core_priv.h +++ b/src/ocf_core_priv.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -105,4 +106,6 @@ ocf_core_id_t ocf_core_get_id(ocf_core_t core); int ocf_core_volume_type_init(ocf_ctx_t ctx); +struct ocf_request *ocf_io_to_req(struct ocf_io *io); + #endif /* __OCF_CORE_PRIV_H__ */ diff --git a/src/ocf_request.c b/src/ocf_request.c index cb82561c..682a3512 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,6 +8,7 @@ #include "ocf_request.h" #include "ocf_cache_priv.h" #include "concurrency/ocf_metadata_concurrency.h" +#include "engine/engine_common.h" #include "utils/utils_cache_line.h" #define OCF_UTILS_RQ_DEBUG 0 @@ -34,9 +36,8 @@ enum ocf_req_size { ocf_req_size_128, }; -static inline size_t ocf_req_sizeof_map(struct ocf_request *req) +static inline size_t ocf_req_sizeof_map(uint32_t lines) { - uint32_t lines = req->core_line_count; size_t size = (lines * sizeof(struct ocf_map_info)); ENV_BUG_ON(lines == 0); @@ -80,6 +81,88 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx) ocf_ctx->resources.req = NULL; } +static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue, + ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) +{ + req->io_queue = queue; + + req->core = core; + req->cache = queue->cache; + + env_atomic_set(&req->ref_count, 1); + + req->byte_position = addr; + req->byte_length = bytes; + req->rw = rw; +} + +struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue) +{ + struct ocf_request *req; + + req = env_zalloc(sizeof(*req), ENV_MEM_NORMAL); + if (unlikely(!req)) + return NULL; + + ocf_queue_get(queue); + + ocf_req_init(req, queue, NULL, 0, 0, 0); + + req->is_mngt = true; + + return req; +} + +struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count) +{ + ocf_cache_t cache = queue->cache; + struct ocf_request *req; + bool map_allocated = true, is_mngt = false; + + if (!ocf_refcnt_inc(&cache->refcnt.metadata)) + return NULL; + + if (unlikely(queue == cache->mngt_queue)) { + req = env_zalloc(sizeof(*req) + ocf_req_sizeof_map(count) + + ocf_req_sizeof_alock_status(count), + ENV_MEM_NORMAL); + is_mngt = true; + } else { + req = env_mpool_new(cache->owner->resources.req, count); + if (!req) { + map_allocated = false; + req = env_mpool_new(cache->owner->resources.req, 1); + } + } + + if (!req) { + ocf_refcnt_dec(&cache->refcnt.metadata); + return NULL; + } + req->is_mngt = is_mngt; + + ocf_queue_get(queue); + + ocf_req_init(req, queue, NULL, 0, 0, OCF_READ); + + if (map_allocated) { + req->map = req->__map; + req->alock_status = (uint8_t*)&req->__map[count]; + req->alloc_core_line_count = count; + } else { + req->alloc_core_line_count = 1; + } + req->core_line_count = count; + req->lock_idx = ocf_metadata_concurrency_next_idx(queue); + req->cleaner = true; + + if (ocf_req_alloc_map(req)) { + ocf_req_put(req); + req = NULL; + } + return req; +} + struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) { @@ -88,6 +171,8 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, struct ocf_request *req; bool map_allocated = true; + ENV_BUG_ON(queue == cache->mngt_queue); + if (likely(bytes)) { core_line_first = ocf_bytes_2_lines(cache, addr); core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1); @@ -115,32 +200,24 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, req->alloc_core_line_count = 1; } - OCF_DEBUG_TRACE(cache); ocf_queue_get(queue); - req->io_queue = queue; - req->core = core; - req->cache = cache; + ocf_req_init(req, queue, core, addr, bytes, rw); - req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc( - &cache->refcnt.metadata); + req->d2c = !ocf_refcnt_inc(&cache->refcnt.metadata); - env_atomic_set(&req->ref_count, 1); - - req->byte_position = addr; - req->byte_length = bytes; req->core_line_first = core_line_first; req->core_line_last = core_line_last; req->core_line_count = core_line_count; - req->rw = rw; - req->part_id = PARTITION_DEFAULT; req->discard.sector = BYTES_TO_SECTORS(addr); req->discard.nr_sects = BYTES_TO_SECTORS(bytes); req->discard.handled = 0; + req->part_id = PARTITION_DEFAULT; + req->lock_idx = ocf_metadata_concurrency_next_idx(queue); return req; @@ -148,10 +225,12 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, int ocf_req_alloc_map(struct ocf_request *req) { + uint32_t lines = req->core_line_count; + if (req->map) return 0; - req->map = env_zalloc(ocf_req_sizeof_map(req) + + req->map = env_zalloc(ocf_req_sizeof_map(lines) + ocf_req_sizeof_alock_status(req->core_line_count), ENV_MEM_NOIO); if (!req->map) { @@ -159,7 +238,7 @@ int ocf_req_alloc_map(struct ocf_request *req) return -OCF_ERR_NO_MEM; } - req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(req)]; + req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(lines)]; return 0; } @@ -229,14 +308,17 @@ void ocf_req_put(struct ocf_request *req) OCF_DEBUG_TRACE(req->cache); - if (!req->d2c && req->io_queue != req->cache->mngt_queue) + if ((!req->d2c && !req->is_mngt) || req->cleaner) ocf_refcnt_dec(&req->cache->refcnt.metadata); - if (req->map != req->__map) - env_free(req->map); - - env_mpool_del(req->cache->owner->resources.req, req, - req->alloc_core_line_count); + if (unlikely(req->is_mngt)) { + env_free(req); + } else { + if (req->map != req->__map) + env_free(req->map); + env_mpool_del(req->cache->owner->resources.req, req, + req->alloc_core_line_count); + } ocf_queue_put(queue); } diff --git a/src/ocf_request.h b/src/ocf_request.h index 4c7f257a..ae3d87dc 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -198,6 +198,9 @@ struct ocf_request { uint8_t d2c : 1; /**!< request affects metadata cachelines (is not direct-to-core) */ + uint8_t cleaner : 1; + /**!< request allocated by cleaner */ + uint8_t dirty : 1; /**!< indicates that request produces dirty data */ @@ -228,6 +231,9 @@ struct ocf_request { uint8_t is_deferred : 1; /* !< request handling was deferred and eventually resumed */ + uint8_t is_mngt : 1; + /* !< It's a management path request */ + ocf_req_cache_mode_t cache_mode; uint64_t timestamp; @@ -275,6 +281,25 @@ int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx); */ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx); +/** + * @brief Allocate new OCF request for the management path + * + * @param queue - I/O queue handle + * + * @return new OCF request + */ +struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue); + +/** + * @brief Allocate new OCF request for cleaner + * + * @param queue - I/O queue handle + * @param count - Number of map entries + * + * @return new OCF request + */ +struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count); + /** * @brief Allocate new OCF request * diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index aab6548d..2d3240dc 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -40,8 +40,7 @@ static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache, uint32_t count, const struct ocf_cleaner_attribs *attribs) { - struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL, - 0, count * ocf_line_size(cache), OCF_READ); + struct ocf_request *req = ocf_req_new_cleaner(attribs->io_queue, count); int ret; if (!req) diff --git a/src/utils/utils_parallelize.c b/src/utils/utils_parallelize.c index e22d4367..53db392e 100644 --- a/src/utils/utils_parallelize.c +++ b/src/utils/utils_parallelize.c @@ -105,8 +105,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize, } else { queue = cache->mngt_queue; } - tmp_parallelize->reqs[i] = ocf_req_new(queue, - NULL, 0, 0, 0); + tmp_parallelize->reqs[i] = ocf_req_new_mngt(queue); if (!tmp_parallelize->reqs[i]) { result = -OCF_ERR_NO_MEM; goto err_reqs; diff --git a/src/utils/utils_pipeline.c b/src/utils/utils_pipeline.c index 2e176161..ee0698e8 100644 --- a/src/utils/utils_pipeline.c +++ b/src/utils/utils_pipeline.c @@ -87,7 +87,7 @@ int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache, tmp_pipeline->priv = (void *)priv; } - req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); + req = ocf_req_new_mngt(cache->mngt_queue); if (!req) { env_vfree(tmp_pipeline); return -OCF_ERR_NO_MEM; From 8b93b699c3433f2a8f0fd774da857e7e3361ed22 Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Mon, 19 Feb 2024 22:25:57 +0100 Subject: [PATCH 2/3] Eliminate queue -> cache mapping Eliminate need to resolve cache based on the queue. This allows to share the queue between cache instances. The queue still holds pointer to a cache that owns the queue, but no management or io path relies on the queue -> cache mapping. Signed-off-by: Robert Baldyga Signed-off-by: Michal Mielewczyk --- inc/ocf_queue.h | 8 ++-- src/metadata/metadata_io.c | 2 +- src/metadata/metadata_raw_dynamic.c | 2 +- src/mngt/ocf_mngt_flush.c | 2 +- src/ocf_core.c | 3 +- src/ocf_queue.c | 13 +++-- src/ocf_request.c | 74 ++++++++++++++++++++++++----- src/ocf_request.h | 19 +++++++- src/utils/utils_cleaner.c | 3 +- src/utils/utils_parallelize.c | 2 +- src/utils/utils_pipeline.c | 2 +- 11 files changed, 98 insertions(+), 32 deletions(-) diff --git a/inc/ocf_queue.h b/inc/ocf_queue.h index 2a59103d..47d9d4c2 100644 --- a/inc/ocf_queue.h +++ b/inc/ocf_queue.h @@ -153,12 +153,12 @@ void *ocf_queue_get_priv(ocf_queue_t q); uint32_t ocf_queue_pending_io(ocf_queue_t q); /** - * @brief Get cache instance to which I/O queue belongs + * @brief Return if queue is management queue * - * @param[in] q I/O queue + * @param[in] queue - queue object * - * @retval Cache instance + * @retval true - if management queue, otherwise false */ -ocf_cache_t ocf_queue_get_cache(ocf_queue_t q); +bool ocf_queue_is_mngt(ocf_queue_t queue); #endif diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 9be1d2fe..b4521a93 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -156,7 +156,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv, if (!context) return -OCF_ERR_NO_MEM; - context->req = ocf_req_new(queue, NULL, 0, 0, 0); + context->req = ocf_req_new_mngt(cache, queue); if (!context->req) { env_vfree(context); return -OCF_ERR_NO_MEM; diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c index bde407b1..9e394c88 100644 --- a/src/metadata/metadata_raw_dynamic.c +++ b/src/metadata/metadata_raw_dynamic.c @@ -493,7 +493,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw, goto err_zpage; } - context->req = ocf_req_new_mngt(cache->mngt_queue); + context->req = ocf_req_new_mngt(cache, cache->mngt_queue); if (!context->req) { result = -OCF_ERR_NO_MEM; goto err_req; diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index d4dcda33..a0ca152d 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -432,7 +432,7 @@ static void _ocf_mngt_flush_container( fc->end = end; fc->context = context; - req = ocf_req_new_mngt(cache->mngt_queue); + req = ocf_req_new_mngt(cache, cache->mngt_queue); if (!req) { error = OCF_ERR_NO_MEM; goto finish; diff --git a/src/ocf_core.c b/src/ocf_core.c index 38f324a6..4088260e 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -489,9 +489,10 @@ static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator, ocf_volume_t volume, ocf_queue_t queue, uint64_t addr, uint32_t bytes, uint32_t dir) { + ocf_core_t core = ocf_volume_to_core(volume); struct ocf_request *req; - req = ocf_req_new(queue, NULL, addr, bytes, dir); + req = ocf_req_new(queue, core, addr, bytes, dir); if (!req) return NULL; diff --git a/src/ocf_queue.c b/src/ocf_queue.c index cb985aaa..c77524a5 100644 --- a/src/ocf_queue.c +++ b/src/ocf_queue.c @@ -126,6 +126,11 @@ int ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue, return 0; } +bool ocf_queue_is_mngt(ocf_queue_t queue) +{ + return queue == queue->cache->mngt_queue; +} + void ocf_queue_get(ocf_queue_t queue) { OCF_CHECK_NULL(queue); @@ -144,7 +149,7 @@ void ocf_queue_put(ocf_queue_t queue) return; queue->ops->stop(queue); - if (queue != queue->cache->mngt_queue) { + if (!ocf_queue_is_mngt(queue)) { env_spinlock_lock_irqsave(&cache->io_queues_lock, flags); list_del(&queue->list); env_spinlock_unlock_irqrestore(&cache->io_queues_lock, flags); @@ -247,12 +252,6 @@ uint32_t ocf_queue_pending_io(ocf_queue_t q) return env_atomic_read(&q->io_no); } -ocf_cache_t ocf_queue_get_cache(ocf_queue_t q) -{ - OCF_CHECK_NULL(q); - return q->cache; -} - void ocf_queue_push_req(struct ocf_request *req, uint flags) { ocf_cache_t cache = req->cache; diff --git a/src/ocf_request.c b/src/ocf_request.c index 682a3512..a0bd2456 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -81,13 +81,14 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx) ocf_ctx->resources.req = NULL; } -static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue, - ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) +static inline void ocf_req_init(struct ocf_request *req, ocf_cache_t cache, + ocf_queue_t queue, ocf_core_t core, + uint64_t addr, uint32_t bytes, int rw) { req->io_queue = queue; req->core = core; - req->cache = queue->cache; + req->cache = cache; env_atomic_set(&req->ref_count, 1); @@ -96,7 +97,7 @@ static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue, req->rw = rw; } -struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue) +struct ocf_request *ocf_req_new_mngt(ocf_cache_t cache, ocf_queue_t queue) { struct ocf_request *req; @@ -106,23 +107,23 @@ struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue) ocf_queue_get(queue); - ocf_req_init(req, queue, NULL, 0, 0, 0); + ocf_req_init(req, cache, queue, NULL, 0, 0, 0); req->is_mngt = true; return req; } -struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count) +struct ocf_request *ocf_req_new_cleaner(ocf_cache_t cache, ocf_queue_t queue, + uint32_t count) { - ocf_cache_t cache = queue->cache; struct ocf_request *req; bool map_allocated = true, is_mngt = false; if (!ocf_refcnt_inc(&cache->refcnt.metadata)) return NULL; - if (unlikely(queue == cache->mngt_queue)) { + if (unlikely(ocf_queue_is_mngt(queue))) { req = env_zalloc(sizeof(*req) + ocf_req_sizeof_map(count) + ocf_req_sizeof_alock_status(count), ENV_MEM_NORMAL); @@ -143,7 +144,7 @@ struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count) ocf_queue_get(queue); - ocf_req_init(req, queue, NULL, 0, 0, OCF_READ); + ocf_req_init(req, cache, queue, NULL, 0, 0, OCF_READ); if (map_allocated) { req->map = req->__map; @@ -167,11 +168,11 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) { uint64_t core_line_first, core_line_last, core_line_count; - ocf_cache_t cache = queue->cache; + ocf_cache_t cache = ocf_core_get_cache(core); struct ocf_request *req; bool map_allocated = true; - ENV_BUG_ON(queue == cache->mngt_queue); + ENV_BUG_ON(ocf_queue_is_mngt(queue)); if (likely(bytes)) { core_line_first = ocf_bytes_2_lines(cache, addr); @@ -204,7 +205,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, ocf_queue_get(queue); - ocf_req_init(req, queue, core, addr, bytes, rw); + ocf_req_init(req, cache, queue, core, addr, bytes, rw); req->d2c = !ocf_refcnt_inc(&cache->refcnt.metadata); @@ -223,6 +224,55 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, return req; } +struct ocf_request *ocf_req_new_cache(ocf_cache_t cache, ocf_queue_t queue, + uint64_t addr, uint32_t bytes, int rw) +{ + uint64_t core_line_first, core_line_last, core_line_count; + struct ocf_request *req; + bool map_allocated = true; + + ENV_BUG_ON(ocf_queue_is_mngt(queue)); + + if (!ocf_refcnt_inc(&cache->refcnt.metadata)) + return NULL; + + ocf_queue_get(queue); + + if (likely(bytes)) { + core_line_first = ocf_bytes_2_lines(cache, addr); + core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1); + core_line_count = core_line_last - core_line_first + 1; + } else { + core_line_count = 1; + } + + req = env_mpool_new(cache->owner->resources.req, core_line_count); + if (!req) { + map_allocated = false; + req = env_mpool_new(cache->owner->resources.req, 1); + } + + if (unlikely(!req)) { + ocf_refcnt_dec(&cache->refcnt.metadata); + ocf_queue_put(queue); + return NULL; + } + + if (map_allocated) { + req->map = req->__map; + req->alock_status = (uint8_t *)&req->__map[core_line_count]; + req->alloc_core_line_count = core_line_count; + } else { + req->alloc_core_line_count = 1; + } + + ocf_req_init(req, cache, queue, NULL, addr, bytes, rw); + + req->lock_idx = ocf_metadata_concurrency_next_idx(queue); + + return req; +} + int ocf_req_alloc_map(struct ocf_request *req) { uint32_t lines = req->core_line_count; diff --git a/src/ocf_request.h b/src/ocf_request.h index ae3d87dc..2872a8b2 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -288,7 +288,7 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx); * * @return new OCF request */ -struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue); +struct ocf_request *ocf_req_new_mngt(ocf_cache_t cache, ocf_queue_t queue); /** * @brief Allocate new OCF request for cleaner @@ -298,7 +298,8 @@ struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue); * * @return new OCF request */ -struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count); +struct ocf_request *ocf_req_new_cleaner(ocf_cache_t cache, ocf_queue_t queue, + uint32_t count); /** * @brief Allocate new OCF request @@ -314,6 +315,20 @@ struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count); struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, uint64_t addr, uint32_t bytes, int rw); +/** + * @brief Allocate new OCF request for cache IO + * + * @param cache - OCF cache instance + * @param queue - I/O queue handle + * @param addr - LBA of request + * @param bytes - number of bytes of request + * @param rw - Read or Write + * + * @return new OCF request + */ +struct ocf_request *ocf_req_new_cache(ocf_cache_t cache, ocf_queue_t queue, + uint64_t addr, uint32_t bytes, int rw); + /** * @brief Allocate OCF request map * diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index 2d3240dc..fc34def2 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -40,9 +40,10 @@ static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache, uint32_t count, const struct ocf_cleaner_attribs *attribs) { - struct ocf_request *req = ocf_req_new_cleaner(attribs->io_queue, count); + struct ocf_request *req; int ret; + req = ocf_req_new_cleaner(cache, attribs->io_queue, count); if (!req) return NULL; diff --git a/src/utils/utils_parallelize.c b/src/utils/utils_parallelize.c index 53db392e..85eba9e8 100644 --- a/src/utils/utils_parallelize.c +++ b/src/utils/utils_parallelize.c @@ -105,7 +105,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize, } else { queue = cache->mngt_queue; } - tmp_parallelize->reqs[i] = ocf_req_new_mngt(queue); + tmp_parallelize->reqs[i] = ocf_req_new_mngt(cache, queue); if (!tmp_parallelize->reqs[i]) { result = -OCF_ERR_NO_MEM; goto err_reqs; diff --git a/src/utils/utils_pipeline.c b/src/utils/utils_pipeline.c index ee0698e8..8744af7b 100644 --- a/src/utils/utils_pipeline.c +++ b/src/utils/utils_pipeline.c @@ -87,7 +87,7 @@ int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache, tmp_pipeline->priv = (void *)priv; } - req = ocf_req_new_mngt(cache->mngt_queue); + req = ocf_req_new_mngt(cache, cache->mngt_queue); if (!req) { env_vfree(tmp_pipeline); return -OCF_ERR_NO_MEM; From dc58eeae9bbd1455288c7994cc7aa7b4aa321646 Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Mon, 25 Sep 2023 22:38:01 +0200 Subject: [PATCH 3/3] Introduce d2c request This avoids unnecessary map allocation and initialization of unused fields of request structure. It also allows to track thier number separately from the regular requests Signed-off-by: Robert Baldyga Signed-off-by: Michal Mielewczyk --- src/mngt/ocf_mngt_cache.c | 2 ++ src/ocf_cache_priv.h | 2 ++ src/ocf_request.c | 44 ++++++++++++++++++++++++++++++++------- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 3b2dc3f7..a2195ac4 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -857,6 +857,8 @@ static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params) /* start with freezed metadata ref counter to indicate detached device*/ ocf_refcnt_freeze(&cache->refcnt.metadata); + ocf_refcnt_init(&cache->refcnt.d2c); + env_atomic_set(&(cache->last_access_ms), env_ticks_to_msecs(env_get_tick_count())); diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h index 0e9b4d10..9a8e02d3 100644 --- a/src/ocf_cache_priv.h +++ b/src/ocf_cache_priv.h @@ -85,6 +85,8 @@ struct ocf_cache { /* # of requests accessing attached metadata, excluding * management reqs */ struct ocf_refcnt metadata __attribute__((aligned(64))); + /* # of requests in d2c mode */ + struct ocf_refcnt d2c; } refcnt; struct { diff --git a/src/ocf_request.c b/src/ocf_request.c index a0bd2456..71ba2da8 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -164,6 +164,22 @@ struct ocf_request *ocf_req_new_cleaner(ocf_cache_t cache, ocf_queue_t queue, return req; } +static inline struct ocf_request *ocf_req_new_d2c(ocf_queue_t queue, + ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + struct ocf_request *req; + + req = env_mpool_new(cache->owner->resources.req, 1); + if (unlikely(!req)) + return NULL; + + ocf_req_init(req, cache, queue, core, addr, bytes, rw); + + req->d2c = true; + return req; +} + struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) { @@ -174,6 +190,19 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, ENV_BUG_ON(ocf_queue_is_mngt(queue)); + ocf_queue_get(queue); + + if (!ocf_refcnt_inc(&cache->refcnt.metadata)) { + if (!ocf_refcnt_inc(&cache->refcnt.d2c)) + ENV_BUG(); + req = ocf_req_new_d2c(queue, core, addr, bytes, rw); + if (unlikely(!req)) { + ocf_queue_put(queue); + return NULL; + } + return req; + } + if (likely(bytes)) { core_line_first = ocf_bytes_2_lines(cache, addr); core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1); @@ -190,8 +219,11 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, req = env_mpool_new(cache->owner->resources.req, 1); } - if (unlikely(!req)) + if (unlikely(!req)) { + ocf_refcnt_dec(&cache->refcnt.metadata); + ocf_queue_put(queue); return NULL; + } if (map_allocated) { req->map = req->__map; @@ -203,11 +235,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, OCF_DEBUG_TRACE(cache); - ocf_queue_get(queue); - - ocf_req_init(req, cache, queue, core, addr, bytes, rw); - - req->d2c = !ocf_refcnt_inc(&cache->refcnt.metadata); + ocf_req_init(req, cache, queue, NULL, addr, bytes, rw); req->core_line_first = core_line_first; req->core_line_last = core_line_last; @@ -358,7 +386,9 @@ void ocf_req_put(struct ocf_request *req) OCF_DEBUG_TRACE(req->cache); - if ((!req->d2c && !req->is_mngt) || req->cleaner) + if (req->d2c) + ocf_refcnt_dec(&req->cache->refcnt.d2c); + else if (!req->is_mngt || req->cleaner) ocf_refcnt_dec(&req->cache->refcnt.metadata); if (unlikely(req->is_mngt)) {