From 594c480b5448dd3508ffb0ab98a44d2042bcdf4e Mon Sep 17 00:00:00 2001 From: jazzvaz Date: Thu, 28 Dec 2017 20:50:32 +0500 Subject: [PATCH] Replace ttapi with thread pool --- src/Layers/xrRender/ParticleEffect.cpp | 11 +- src/Layers/xrRenderPC_R1/FStaticRender.cpp | 3 - src/Layers/xrRenderPC_R1/LightProjector.cpp | 4 - src/Layers/xrRenderPC_R1/LightShadows.cpp | 5 +- src/xrCore/Math/MathUtil.cpp | 9 +- src/xrCore/Math/Skin4W_MT.cpp | 11 +- src/xrCore/Threading/ThreadPool.cpp | 71 +++++ src/xrCore/Threading/ThreadPool.hpp | 70 +++++ src/xrCore/Threading/ttapi.cpp | 255 ------------------ src/xrCore/Threading/ttapi.h | 22 -- src/xrCore/xrCore.cpp | 5 +- src/xrCore/xrCore.vcxproj | 4 +- src/xrCore/xrCore.vcxproj.filters | 12 +- src/xrEngine/IGame_Level.cpp | 12 +- .../particle_actions_collection.cpp | 10 +- 15 files changed, 180 insertions(+), 324 deletions(-) create mode 100644 src/xrCore/Threading/ThreadPool.cpp create mode 100644 src/xrCore/Threading/ThreadPool.hpp delete mode 100644 src/xrCore/Threading/ttapi.cpp delete mode 100644 src/xrCore/Threading/ttapi.h diff --git a/src/Layers/xrRender/ParticleEffect.cpp b/src/Layers/xrRender/ParticleEffect.cpp index 7fc2211b839..f36f3f5a986 100644 --- a/src/Layers/xrRender/ParticleEffect.cpp +++ b/src/Layers/xrRender/ParticleEffect.cpp @@ -3,7 +3,7 @@ #include "ParticleEffect.h" #ifndef _EDITOR #include -#include "xrCore/Threading/ttapi.h" +#include "xrCore/Threading/ThreadPool.hpp" #endif using namespace PAPI; @@ -434,7 +434,7 @@ __forceinline void magnitude_sse(Fvector& vec, float& res) _mm_store_ss((float*)&res, tv); } -void ParticleRenderStream(LPVOID lpvParams) +void ParticleRenderStream(void* lpvParams) { #ifdef _GPA_ENABLED TAL_SCOPED_TASK_NAMED("ParticleRenderStream()"); @@ -587,8 +587,9 @@ void CParticleEffect::Render(float) FVF::LIT* pv_start = (FVF::LIT*)RCache.Vertex.Lock(p_cnt * 4 * 4, geom->vb_stride, dwOffset); FVF::LIT* pv = pv_start; - u32 nWorkers = ttapi_GetWorkerCount(); + u32 nWorkers = ttapi.threads.size(); + // XXX: Xottab_DUTY: Review this if (p_cnt < nWorkers * 64) nWorkers = 1; @@ -610,10 +611,10 @@ void CParticleEffect::Render(float) prsParams[i].p_to = (i == (nWorkers - 1)) ? p_cnt : (prsParams[i].p_from + nStep); prsParams[i].particles = particles; prsParams[i].pPE = this; - ttapi_AddWorker(ParticleRenderStream, (LPVOID)&prsParams[i]); + ttapi.threads[i]->addJob([=] { ParticleRenderStream((void*)&prsParams[i]); }); } - ttapi_Run(); + ttapi.wait(); dwCount = p_cnt << 2; diff --git a/src/Layers/xrRenderPC_R1/FStaticRender.cpp b/src/Layers/xrRenderPC_R1/FStaticRender.cpp index a79dce7f8e4..320b410fb44 100644 --- a/src/Layers/xrRenderPC_R1/FStaticRender.cpp +++ b/src/Layers/xrRenderPC_R1/FStaticRender.cpp @@ -15,9 +15,6 @@ #include "Layers/xrRender/lighttrack.h" #include "Layers/xrRender/dxWallMarkArray.h" #include "Layers/xrRender/dxUIShader.h" -#ifndef _EDITOR -#include "xrCore/Threading/ttapi.h" -#endif using namespace R_dsgraph; diff --git a/src/Layers/xrRenderPC_R1/LightProjector.cpp b/src/Layers/xrRenderPC_R1/LightProjector.cpp index 1e8b0c67a32..2cc7345e03c 100644 --- a/src/Layers/xrRenderPC_R1/LightProjector.cpp +++ b/src/Layers/xrRenderPC_R1/LightProjector.cpp @@ -8,10 +8,6 @@ #include "xrEngine/xr_object.h" #include "Layers/xrRender/LightTrack.h" -#ifndef _EDITOR -#include "xrCore/Threading/ttapi.h" -#endif - // tir2.xrdemo -> 45.2 // tir2.xrdemo -> 61.8 diff --git a/src/Layers/xrRenderPC_R1/LightShadows.cpp b/src/Layers/xrRenderPC_R1/LightShadows.cpp index bfcfe32b366..6fd6a3674a9 100644 --- a/src/Layers/xrRenderPC_R1/LightShadows.cpp +++ b/src/Layers/xrRenderPC_R1/LightShadows.cpp @@ -4,9 +4,6 @@ #include "xrEngine/xr_object.h" #include "Layers/xrRender/FBasicVisual.h" #include "xrEngine/CustomHUD.h" -#ifndef _EDITOR -#include "xrCore/Threading/ttapi.h" -#endif #include "xrCore/Math/MathUtil.hpp" using namespace XRay::Math; @@ -353,6 +350,7 @@ IC bool cache_search(const CLightShadows::cache_item& A, const CLightShadows::ca return false; // eq } +// XXX: use PLC_energy from xrCore IC float PLC_energy(Fvector& P, Fvector& N, light* L, float E) { Fvector Ldir; @@ -386,6 +384,7 @@ IC float PLC_energy(Fvector& P, Fvector& N, light* L, float E) } } +// XXX: use PLC_calc from xrCore (maybe) IC int PLC_calc(Fvector& P, Fvector& N, light* L, float energy, Fvector& O) { float E = PLC_energy(P, N, L, energy); diff --git a/src/xrCore/Math/MathUtil.cpp b/src/xrCore/Math/MathUtil.cpp index e3e8b3c4883..f4ffd5ceb70 100644 --- a/src/xrCore/Math/MathUtil.cpp +++ b/src/xrCore/Math/MathUtil.cpp @@ -1,6 +1,6 @@ #include "stdafx.h" #include "MathUtil.hpp" -#include "Threading/ttapi.h" +#include "Threading/ThreadPool.hpp" #ifdef _EDITOR #include "SkeletonX.h" @@ -55,12 +55,15 @@ void Initialize() PLCCalc = PLCCalc_SSE; //PLCCalc = PLCCalc_CPP; #endif + // XXX: use PLC_energy and iCeil too + // SSE implementations of this functions is not used. + // Found duplicate implementation in src\Layers\xrRenderPC_R1\LightShadows.cpp + // Search for other duplicates - if (ttapi_GetWorkerCount() > 1) + if (ttapi.threads.size() > 1) Skin4W = Skin4W_MT; initialized = true; } - } // namespace Math } // namespace XRay diff --git a/src/xrCore/Math/Skin4W_MT.cpp b/src/xrCore/Math/Skin4W_MT.cpp index be9cbfbd144..f47f4ed74b2 100644 --- a/src/xrCore/Math/Skin4W_MT.cpp +++ b/src/xrCore/Math/Skin4W_MT.cpp @@ -1,6 +1,6 @@ #include "stdafx.h" #include "Skin4W_MT.hpp" -#include "Threading/ttapi.h" +#include "Threading/ThreadPool.hpp" #ifdef _EDITOR #include "SkeletonX.h" #include "SkeletonCustom.h" @@ -28,6 +28,7 @@ void Skin4W_Stream(void* params) #ifdef _GPA_ENABLED TAL_SCOPED_TASK_NAMED("Skin4W_Stream()"); #endif + auto& sp = *(SkinParams*)params; auto dst = (vertRender*)sp.Dest; auto src = (vertBoned4W*)sp.Src; @@ -40,7 +41,7 @@ void Skin4W_MT(vertRender* dst, vertBoned4W* src, u32 vCount, CBoneInstance* bon #ifdef _GPA_ENABLED TAL_SCOPED_TASK_NAMED("Skin4W_MT()"); #endif - u32 workerCount = ttapi_GetWorkerCount(); + u32 workerCount = ttapi.threads.size(); if (vCount < workerCount * 64) { Skin4W_MTs(dst, src, vCount, bones); @@ -57,10 +58,10 @@ void Skin4W_MT(vertRender* dst, vertBoned4W* src, u32 vCount, CBoneInstance* bon params[i].Src = src + i * nStep; params[i].Count = i == (workerCount - 1) ? nLast : nStep; params[i].Data = bones; - ttapi_AddWorker(Skin4W_Stream, ¶ms[i]); + ttapi.threads[i]->addJob([=] { Skin4W_Stream(¶ms[i]); }); } - ttapi_Run(); + ttapi.wait(); } -} // namespace Util3D +} // namespace Math } // namespace XRay diff --git a/src/xrCore/Threading/ThreadPool.cpp b/src/xrCore/Threading/ThreadPool.cpp new file mode 100644 index 00000000000..7bf31d57646 --- /dev/null +++ b/src/xrCore/Threading/ThreadPool.cpp @@ -0,0 +1,71 @@ +#include "stdafx.h" +#include "Threading/ThreadPool.hpp" +#ifdef _GPA_ENABLED +#include +#endif + +/* +* Basic C++11 based thread pool with per-thread job queues +* +* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de +* +* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT) +*/ + +Thread::Thread() +{ + worker = std::thread(&Thread::queueLoop, this); +} + +Thread::~Thread() +{ + if (worker.joinable()) + { + wait(); + queueMutex.lock(); + destroying = true; + condition.notify_one(); + queueMutex.unlock(); + worker.join(); + } +} + +void Thread::addJob(std::function function) +{ + std::lock_guard lock(queueMutex); + jobQueue.push(std::move(function)); + condition.notify_one(); +} + +void Thread::wait() +{ + std::unique_lock lock(queueMutex); + condition.wait(lock, [this]() { return jobQueue.empty(); }); +} + +void Thread::queueLoop() +{ + while (true) + { + std::function job; + { + std::unique_lock lock(queueMutex); + condition.wait(lock, [this] { return !jobQueue.empty() || destroying; }); + if (destroying) + { + break; + } + job = jobQueue.front(); + } + + job(); + + { + std::lock_guard lock(queueMutex); + jobQueue.pop(); + condition.notify_one(); + } + } +} + +XRCORE_API ThreadPool ttapi; diff --git a/src/xrCore/Threading/ThreadPool.hpp b/src/xrCore/Threading/ThreadPool.hpp new file mode 100644 index 00000000000..889d1b78b78 --- /dev/null +++ b/src/xrCore/Threading/ThreadPool.hpp @@ -0,0 +1,70 @@ +#pragma once +#include "xrCore/xrCore.h" + +/* +* Basic C++11 based thread pool with per-thread job queues +* +* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de +* +* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT) +*/ + +#include +#include +#include +#include +#include +#include +#include + +class XRCORE_API Thread +{ + bool destroying = false; + std::thread worker; + std::queue> jobQueue; + std::mutex queueMutex; + std::condition_variable condition; + + // Loop through all remaining jobs + void queueLoop(); + +public: + Thread(); + ~Thread(); + + // Add a new job to the thread's queue + void addJob(std::function function); + + // Wait until all work items have been finished + void wait(); +}; + +class ThreadPool +{ +public: + std::vector> threads; + + void initialize() + { + const int num_threads = std::thread::hardware_concurrency(); + R_ASSERT(num_threads > 0); + setThreadCount(num_threads); + } + + // Sets the number of threads to be allocated in this pool + void setThreadCount(const uint32_t count) + { + threads.clear(); + for (auto i = 0; i < count; i++) + threads.push_back(std::make_unique()); + } + + // Wait until all threads have finished their work items + void wait() + { + for (auto &thread : threads) + thread->wait(); + } +}; + +extern XRCORE_API ThreadPool ttapi; diff --git a/src/xrCore/Threading/ttapi.cpp b/src/xrCore/Threading/ttapi.cpp deleted file mode 100644 index d90f42cb3c0..00000000000 --- a/src/xrCore/Threading/ttapi.cpp +++ /dev/null @@ -1,255 +0,0 @@ -#include "stdafx.h" -#include "ttapi.h" -#include -#ifdef _GPA_ENABLED -#include -#endif - -typedef struct TTAPI_WORKER_PARAMS -{ - volatile LONG vlFlag; - TTAPIWorkerFunc lpWorkerFunc; - LPVOID lpvWorkerFuncParams; - DWORD dwPadding[13]; -} * PTTAPI_WORKER_PARAMS; - -typedef PTTAPI_WORKER_PARAMS LPTTAPI_WORKER_PARAMS; - -static LPHANDLE ttapi_threads_handles = NULL; -static bool ttapi_initialized = false; -static DWORD ttapi_worker_count = 0; -static DWORD ttapi_thread_count = 0; -static DWORD ttapi_assigned_workers = 0; -static LPTTAPI_WORKER_PARAMS ttapi_worker_params = NULL; -static DWORD ttapi_dwFastIter = 0; -static DWORD ttapi_dwSlowIter = 0; - -struct -{ - volatile LONG size; - DWORD dwPadding[15]; -} ttapi_queue_size; - -static DWORD WINAPI ttapiThreadProc(void* lpParameter) -{ - LPTTAPI_WORKER_PARAMS pParams = (LPTTAPI_WORKER_PARAMS)lpParameter; - DWORD i, dwFastIter = ttapi_dwFastIter, dwSlowIter = ttapi_dwSlowIter; - - while (TRUE) - { - // Wait - - // Fast - for (i = 0; i < dwFastIter; ++i) - { - if (pParams->vlFlag == 0) - { - // Msg( "0x%8.8X Fast %u" , dwId , i ); - goto process; - } - _mm_pause(); - } - - // Moderate - for (i = 0; i < dwSlowIter; ++i) - { - if (pParams->vlFlag == 0) - { - // Msg( "0x%8.8X Moderate %u" , dwId , i ); - goto process; - } - SwitchToThread(); - } - - // Slow - while (pParams->vlFlag) - Sleep(100); - process: - pParams->vlFlag = 1; - - if (pParams->lpWorkerFunc) - pParams->lpWorkerFunc(pParams->lpvWorkerFuncParams); - else - break; - - _InterlockedDecrement(&ttapi_queue_size.size); - - } // while - - return 0; -} - -typedef struct tagTHREADNAME_INFO -{ - DWORD dwType; - LPCSTR szName; - DWORD dwThreadID; - DWORD dwFlags; -} THREADNAME_INFO; - -void SetThreadName(DWORD dwThreadID, LPCSTR szThreadName) -{ - THREADNAME_INFO info; - { - info.dwType = 0x1000; - info.szName = szThreadName; - info.dwThreadID = dwThreadID; - info.dwFlags = 0; - } - __try - { - RaiseException(0x406D1388, 0, sizeof(info) / sizeof(DWORD), (ULONG_PTR*)&info); - } - __except (EXCEPTION_CONTINUE_EXECUTION) - { - } -} - -int ttapi_Init(const processor_info& pi) -{ - if (ttapi_initialized) - return ttapi_worker_count; - - // System Info - ttapi_worker_count = pi.n_cores; - SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS); - DWORD dwNumIter; - volatile DWORD dwDummy = 1; - LARGE_INTEGER liFrequency, liStart, liEnd; - QueryPerformanceFrequency(&liFrequency); - // Get fast spin-loop timings - dwNumIter = 100000000; - QueryPerformanceCounter(&liStart); - for (DWORD i = 0; i < dwNumIter; ++i) - { - if (!dwDummy) - goto process1; - _mm_pause(); - } -process1: - QueryPerformanceCounter(&liEnd); - // We want 1/25 (40ms) fast spin-loop - ttapi_dwFastIter = DWORD((dwNumIter * liFrequency.QuadPart) / ((liEnd.QuadPart - liStart.QuadPart) * 25)); - // Get slow spin-loop timings - dwNumIter = 10000000; - QueryPerformanceCounter(&liStart); - for (DWORD i = 0; i < dwNumIter; ++i) - { - if (!dwDummy) - goto process2; - SwitchToThread(); - } -process2: - QueryPerformanceCounter(&liEnd); - // We want 1/2 (500ms) slow spin-loop - ttapi_dwSlowIter = DWORD((dwNumIter * liFrequency.QuadPart) / ((liEnd.QuadPart - liStart.QuadPart) * 2)); - SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS); - // Check for override from command line - char szSearchFor[] = "-max-threads"; - char* pszTemp = strstr(GetCommandLine(), szSearchFor); - DWORD dwOverride = 0; - if (pszTemp && sscanf_s(pszTemp + strlen(szSearchFor), "%u", &dwOverride)) - { - if (dwOverride >= 1 && dwOverride < ttapi_worker_count) - ttapi_worker_count = dwOverride; - } - // Number of helper threads - ttapi_thread_count = ttapi_worker_count - 1; - - // Creating control structures - if ((ttapi_threads_handles = (LPHANDLE)malloc(sizeof(HANDLE) * ttapi_thread_count)) == NULL) - return 0; - if ((ttapi_worker_params = (PTTAPI_WORKER_PARAMS)malloc(sizeof(TTAPI_WORKER_PARAMS) * ttapi_worker_count)) == NULL) - return 0; - // Clearing params - for (DWORD i = 0; i < ttapi_worker_count; i++) - ttapi_worker_params[i] = {}; - char szThreadName[64]; - DWORD dwThreadId = 0; - DWORD dwAffinitiMask = pi.affinity_mask; - DWORD dwCurrentMask = 0x01; - // Setting affinity - while (!(dwAffinitiMask & dwCurrentMask)) - dwCurrentMask <<= 1; - SetThreadAffinityMask(GetCurrentThread(), dwCurrentMask); - // Creating threads - for (DWORD i = 0; i < ttapi_thread_count; i++) - { - // Initializing "enter" "critical section" - ttapi_worker_params[i].vlFlag = 1; - ttapi_threads_handles[i] = CreateThread(NULL, 0, &ttapiThreadProc, &ttapi_worker_params[i], 0, &dwThreadId); - if (!ttapi_threads_handles[i]) - return 0; - // Setting affinity - do - { - dwCurrentMask <<= 1; - } while (!(dwAffinitiMask & dwCurrentMask)); - SetThreadAffinityMask(ttapi_threads_handles[i], dwCurrentMask); - // Setting thread name - sprintf_s(szThreadName, "Helper Thread #%u", i); - SetThreadName(dwThreadId, szThreadName); - } - ttapi_initialized = true; - return ttapi_worker_count; -} - -int ttapi_GetWorkerCount() { return ttapi_worker_count; } -// We do not check for overflow here to be faster -// Assume that caller is smart enough to use ttapi_GetWorkersCount() to get number of available slots -void ttapi_AddWorker(TTAPIWorkerFunc lpWorkerFunc, void* lpvWorkerFuncParams) -{ - // Assigning parameters - ttapi_worker_params[ttapi_assigned_workers].lpWorkerFunc = lpWorkerFunc; - ttapi_worker_params[ttapi_assigned_workers].lpvWorkerFuncParams = lpvWorkerFuncParams; - ttapi_assigned_workers++; -} - -void ttapi_Run() -{ - DWORD workerCount = (ttapi_assigned_workers - 1); - if (workerCount) - { - // Setting queue size - ttapi_queue_size.size = workerCount; - // Starting all workers except the last - for (DWORD i = 0; i < workerCount; ++i) - _InterlockedExchange(&ttapi_worker_params[i].vlFlag, 0); - // Running last worker in current thread - ttapi_worker_params[workerCount].lpWorkerFunc(ttapi_worker_params[workerCount].lpvWorkerFuncParams); - // Waiting task queue to become empty - // Start = __rdtsc(); - while (ttapi_queue_size.size) - _mm_pause(); - // Stop = __rdtsc(); - // Msg( "Wait: %u ticks" , Stop - Start ); - } - else - // Running the only worker in current thread - ttapi_worker_params[workerCount].lpWorkerFunc(ttapi_worker_params[workerCount].lpvWorkerFuncParams); - // Cleaning active workers count - ttapi_assigned_workers = 0; -} - -void ttapi_Done() -{ - if (!ttapi_initialized) - return; - // Asking helper threads to terminate - for (DWORD i = 0; i < ttapi_thread_count; i++) - { - ttapi_worker_params[i].lpWorkerFunc = NULL; - _InterlockedExchange(&ttapi_worker_params[i].vlFlag, 0); - } - // Waiting threads for completion - WaitForMultipleObjects(ttapi_thread_count, ttapi_threads_handles, TRUE, INFINITE); - // Freeing resources - free(ttapi_threads_handles); - ttapi_threads_handles = nullptr; - free(ttapi_worker_params); - ttapi_worker_params = nullptr; - ttapi_worker_count = 0; - ttapi_thread_count = 0; - ttapi_assigned_workers = 0; - ttapi_initialized = false; -} diff --git a/src/xrCore/Threading/ttapi.h b/src/xrCore/Threading/ttapi.h deleted file mode 100644 index 7afb5b4db25..00000000000 --- a/src/xrCore/Threading/ttapi.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once -#include "xrCore/xrCore.h" -// Trivial (and dumb) Threads API - -using TTAPIWorkerFunc = void (*)(void* lpWorkerParameters); - -// Initializes subsystem -// Returns zero for error, and number of workers on success -int XRCORE_API ttapi_Init(const processor_info& pi); - -// Destroys subsystem -void XRCORE_API ttapi_Done(); - -// Return number of workers -int XRCORE_API ttapi_GetWorkerCount(); - -// Adds new task -// No more than TTAPI_HARDCODED_THREADS should be added -void XRCORE_API ttapi_AddWorker(TTAPIWorkerFunc workerFunc, void* workerFuncParams); - -// Runs and wait for all workers to complete job -void XRCORE_API ttapi_Run(); diff --git a/src/xrCore/xrCore.cpp b/src/xrCore/xrCore.cpp index 04283bb3510..721d3a804d3 100644 --- a/src/xrCore/xrCore.cpp +++ b/src/xrCore/xrCore.cpp @@ -6,7 +6,7 @@ #include #include #include "xrCore.h" -#include "Threading/ttapi.h" +#include "Threading/ThreadPool.hpp" #include "Math/MathUtil.hpp" #pragma comment(lib, "winmm.lib") @@ -76,7 +76,7 @@ void xrCore::Initialize(pcstr _ApplicationName, LogCallback cb, bool init_fs, pc Msg("%s %s build %d, %s\n", "xdOpenXRay", GetBuildConfiguration(), buildId, buildDate); _initialize_cpu(); R_ASSERT(CPU::ID.hasFeature(CpuFeature::Sse)); - ttapi_Init(CPU::ID); + ttapi.initialize(); XRay::Math::Initialize(); // xrDebug::Initialize (); @@ -132,7 +132,6 @@ void xrCore::_destroy() --init_counter; if (0 == init_counter) { - ttapi_Done(); FS._destroy(); EFS._destroy(); xr_delete(xr_FS); diff --git a/src/xrCore/xrCore.vcxproj b/src/xrCore/xrCore.vcxproj index 21ac471b8f6..251d50fc56e 100644 --- a/src/xrCore/xrCore.vcxproj +++ b/src/xrCore/xrCore.vcxproj @@ -235,7 +235,7 @@ - + @@ -346,7 +346,7 @@ - + diff --git a/src/xrCore/xrCore.vcxproj.filters b/src/xrCore/xrCore.vcxproj.filters index 742a5211bb2..38ff0e5056d 100644 --- a/src/xrCore/xrCore.vcxproj.filters +++ b/src/xrCore/xrCore.vcxproj.filters @@ -273,9 +273,6 @@ Media - - Threading - Math @@ -348,6 +345,9 @@ Memory\Memory manager\dlmalloc\wrapper + + Threading + @@ -605,9 +605,6 @@ Kernel - - Threading - Math @@ -743,6 +740,9 @@ Memory + + Threading + diff --git a/src/xrEngine/IGame_Level.cpp b/src/xrEngine/IGame_Level.cpp index dcd5dd0dce8..2c0f44e7276 100644 --- a/src/xrEngine/IGame_Level.cpp +++ b/src/xrEngine/IGame_Level.cpp @@ -135,15 +135,11 @@ bool IGame_Level::Load(u32 dwNum) return true; } -#ifndef _EDITOR -#include "xrCore/Threading/ttapi.h" -#endif - int psNET_DedicatedSleep = 5; void IGame_Level::OnRender() { #ifndef DEDICATED_SERVER -// if (_abs(Device.fTimeDelta)pFontSystem->SetSizeI(0.023f); -// pApp->pFontSystem->OnRender (); + // Font + // pApp->pFontSystem->SetSizeI(0.023f); + // pApp->pFontSystem->OnRender(); #endif } diff --git a/src/xrParticles/particle_actions_collection.cpp b/src/xrParticles/particle_actions_collection.cpp index 4ce023b2c74..09ca00b0827 100644 --- a/src/xrParticles/particle_actions_collection.cpp +++ b/src/xrParticles/particle_actions_collection.cpp @@ -1617,7 +1617,7 @@ extern void noise3Init(); #ifndef _EDITOR #include -#include "xrCore/Threading/ttapi.h" +#include "xrCore/Threading/ThreadPool.hpp" ICF __m128 _mm_load_fvector(const Fvector& v) { @@ -1753,8 +1753,9 @@ void PATurbulence::Execute(ParticleEffect* effect, const float dt, float& tm_max if (!p_cnt) return; - u32 nWorkers = ttapi_GetWorkerCount(); + u32 nWorkers = ttapi.threads.size(); + // XXX: Xottab_DUTY: Review this if (p_cnt < nWorkers * 64) nWorkers = 1; @@ -1781,11 +1782,10 @@ void PATurbulence::Execute(ParticleEffect* effect, const float dt, float& tm_max tesParams[i].frequency = frequency; tesParams[i].octaves = octaves; tesParams[i].magnitude = magnitude; - - ttapi_AddWorker(PATurbulenceExecuteStream, (LPVOID)&tesParams[i]); + ttapi.threads[i]->addJob([=] { PATurbulenceExecuteStream((void*)&tesParams[i]); }); } - ttapi_Run(); + ttapi.wait(); } #else