diff --git a/3dti_ResourceManager/BRIR/BRIRFactory.cpp b/3dti_ResourceManager/BRIR/BRIRFactory.cpp index 911064de..80445740 100644 --- a/3dti_ResourceManager/BRIR/BRIRFactory.cpp +++ b/3dti_ResourceManager/BRIR/BRIRFactory.cpp @@ -161,8 +161,7 @@ namespace BRIR { if (LoadBRIRTableFromSOFA(sofafile, environment)) { - environment->GetBRIR()->EndSetup(); - return true; + return environment->GetBRIR()->EndSetup(); } else { @@ -275,31 +274,39 @@ namespace BRIR double azimuth = pos[array2DIndex(i, 0, nMeasurements, dims[1])]; double elevation = pos[array2DIndex(i, 1, nMeasurements, dims[1])]; - while (elevation < 0) elevation += 360; // TODO: check who should do this - + while (elevation < 0) elevation += 360; // SET VIRTUAL SPEAKER if ((AnglesAreCloseInDegrees(azimuth, NORTH_AZIMUTH)) && (AnglesAreCloseInDegrees(elevation, 0.0f))) { - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT, std::move(leftBRIRChannel)); - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT, std::move(rightBRIRChannel)); + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT, std::move(leftBRIRChannel))){return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} } if ((AnglesAreCloseInDegrees(azimuth, SOUTH_AZIMUTH)) && (AnglesAreCloseInDegrees(elevation, 0.0f))) { - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT, std::move(leftBRIRChannel)); - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT, std::move(rightBRIRChannel)); + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT, std::move(leftBRIRChannel))){return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} } if ((AnglesAreCloseInDegrees(azimuth, WEST_AZIMUTH)) && (AnglesAreCloseInDegrees(elevation, 0.0f))) { - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT, std::move(leftBRIRChannel)); - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT, std::move(rightBRIRChannel)); + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT, std::move(leftBRIRChannel))){return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} } if ((AnglesAreCloseInDegrees(azimuth, EAST_AZIMUTH)) && (AnglesAreCloseInDegrees(elevation, 0.0f))) { - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT, std::move(leftBRIRChannel)); - environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT, std::move(rightBRIRChannel)); + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT, std::move(leftBRIRChannel))){return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} + } + if (AnglesAreCloseInDegrees(elevation, 90.0f)) + { + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::ZENIT, Common::T_ear::LEFT, std::move(leftBRIRChannel))){return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::ZENIT, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} + } + if (AnglesAreCloseInDegrees(elevation, 270.0f)) + { + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NADIR, Common::T_ear::LEFT, std::move(leftBRIRChannel))) {return false;} + if(!environment->GetBRIR()->AddBRIR(VirtualSpeakerPosition::NADIR, Common::T_ear::RIGHT, std::move(rightBRIRChannel))){return false;} } - // TO DO: consider elevations. Read zenith and nadir speakers } return true; } diff --git a/3dti_Toolkit/BinauralSpatializer/BRIR.cpp b/3dti_Toolkit/BinauralSpatializer/BRIR.cpp index ecd8873c..9ff8a681 100644 --- a/3dti_Toolkit/BinauralSpatializer/BRIR.cpp +++ b/3dti_Toolkit/BinauralSpatializer/BRIR.cpp @@ -64,21 +64,26 @@ namespace Binaural { return t_BRIR_DataBase; } - void CBRIR::AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR) + bool CBRIR::AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR) { if (setupInProgress) { auto returnValue = t_BRIR_DataBase.emplace(TVirtualSpeaker(vsPosition, vsChannel), std::forward(newBRIR)); //Error handler - if (returnValue.second) { /*SET_RESULT(RESULT_OK, "BRIR emplaced into t_BRIR_DataBase succesfully"); */ } - else { SET_RESULT(RESULT_WARNING, "Error emplacing BRIR in t_BRIR_DataBase map"); } + if (returnValue.second) { /*SET_RESULT(RESULT_OK, "BRIR emplaced into t_BRIR_DataBase succesfully"); */ + return true; + } + else { SET_RESULT(RESULT_WARNING, "Error emplacing BRIR in t_BRIR_DataBase map"); + return false; + } } else { SET_RESULT(RESULT_ERROR_NOTSET, "AddBRIR: It is not possible to Add a BRIR. Try to call BeginSetUp before addind a BRIR"); + return false; } } - void CBRIR::EndSetup() + bool CBRIR::EndSetup() { if (!t_BRIR_DataBase.empty()) { @@ -93,17 +98,21 @@ namespace Binaural { #endif //Calculate ARIR table and set the convolution buffers - ownerEnvironment->SetABIR(); + if (!ownerEnvironment->SetABIR()) { + return false; + } //Free up memory //t_BRIR_DataBase.clear(); SET_RESULT(RESULT_OK, "BRIR Matrix completed succesfully"); + return true; } else { // TO DO: Should be ASSERT? SET_RESULT(RESULT_ERROR_NOTSET, "The t_BRIR_DataBase map has not been set"); + return false; } } @@ -141,6 +150,10 @@ namespace Binaural { BRIRLength_frequency = 0; bufferSize = 0; } + bool CBRIR::IsIREmpty(const TImpulseResponse_Partitioned& in) { + + return in == emptyBRIR_partitioned; + } const TImpulseResponse_Partitioned & CBRIR::GetBRIR_Partitioned(VirtualSpeakerPosition vsPos, Common::T_ear vsChannel) const { diff --git a/3dti_Toolkit/BinauralSpatializer/BRIR.h b/3dti_Toolkit/BinauralSpatializer/BRIR.h index 9e1798c6..75fa69d3 100644 --- a/3dti_Toolkit/BinauralSpatializer/BRIR.h +++ b/3dti_Toolkit/BinauralSpatializer/BRIR.h @@ -115,10 +115,11 @@ namespace Binaural * \param [in] vsPosition Virtual Speaker position (N,S,E,W) * \param [in] vsChannel Virtual Speaker Channel (left, right) * \param [in] newBRIR BRIR vector value to add to the BRIR matrix + * \retval boolean to indicate if BRIR has been added correctly * \eh On error, an error code is reported to the error handler. Warnings may be reported to the error handler */ - void AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR); + bool AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR); /** \brief Set the full BRIR matrix. * \param [in] newTable full table with all BRIR data @@ -127,10 +128,11 @@ namespace Binaural void AddBRIRTable(TBRIRTable && newTable); /** \brief Stop the BRIR configuration and set the ABIR configuration + * \retval boolean to indicate if setup was successful * \eh On success, RESULT_OK is reported to the error handler. * On error, an error code is reported to the error handler. */ - void EndSetup(); + bool EndSetup(); /** \brief Get BRIR filter length in time domain * \retval length int BRIR filter length in time domain @@ -192,6 +194,8 @@ namespace Binaural * \retval table raw BRIR table partitioned */ //const TBRIRTablePartitioned & GetRawBRIRTablePartitioned() const; + + bool IsIREmpty(const TImpulseResponse_Partitioned& in); private: diff --git a/3dti_Toolkit/BinauralSpatializer/Environment.cpp b/3dti_Toolkit/BinauralSpatializer/Environment.cpp index 37fa5c2e..f5175dbf 100644 --- a/3dti_Toolkit/BinauralSpatializer/Environment.cpp +++ b/3dti_Toolkit/BinauralSpatializer/Environment.cpp @@ -83,20 +83,70 @@ namespace Binaural { //WARNING: This setup is valid because it is assumed that BRIRLength = AIRLength outputLeft.Setup(bufferLength, BRIRLength); outputRight.Setup(bufferLength, BRIRLength); -#else - //Prepare output buffers to perform UP convolutions in ProcessVirtualAmbisonicReverb - wLeft_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - wRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - xLeft_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - xRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - yLeft_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - yRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); +#else //Prepare output buffers to perform UP convolutions in ProcessVirtualAmbisonicReverb + switch(reverberationOrder){ + case TReverberationOrder::BIDIMENSIONAL: + wLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + wRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + xLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + xRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + yLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + yRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + break; + case TReverberationOrder::THREEDIMENSIONAL: + wLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + wRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + xLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + xRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + yLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + yRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + zLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + zRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + break; + case TReverberationOrder::ADIMENSIONAL: + wLeft_UPConvolution. Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + wRight_UPConvolution.Setup(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + break; + } #endif } } } - void CEnvironment::SetABIR() + TReverberationOrder CEnvironment::GetReverberationOrder() + { + return reverberationOrder; + } + + void CEnvironment::SetABIRAdimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks) + { + wLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + wRight_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + } + + void CEnvironment::SetABIRBidimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks) + { + wLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + wRight_UPConvolution.Setup(bufferLength,blockLengthFreq, numberOfBlocks, false); + xLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + xRight_UPConvolution.Setup(bufferLength,blockLengthFreq, numberOfBlocks, false); + yLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + yRight_UPConvolution.Setup(bufferLength,blockLengthFreq, numberOfBlocks, false); + } + + void CEnvironment::SetABIRThreedimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks) + { + wLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + wRight_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + xLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + xRight_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + yLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + yRight_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + zLeft_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + zRight_UPConvolution.Setup(bufferLength, blockLengthFreq, numberOfBlocks, false); + } + + bool CEnvironment::SetABIR() { if (ownerCore != nullptr) { @@ -114,61 +164,192 @@ namespace Binaural { outputRight.Setup(bufferLength, BRIRLength); #else //Configure AIR values (partitions and FFTs) - CalculateABIRPartitioned(); + bool result = CalculateABIRPartitioned(); //Prepare output buffers to perform UP convolutions in ProcessVirtualAmbisonicReverb - wLeft_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - wRight_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - xLeft_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - xRight_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - yLeft_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); - yRight_UPConvolution.Setup (bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks(), false); + switch (reverberationOrder) { + case TReverberationOrder::BIDIMENSIONAL: + SetABIRBidimensional(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks()); + break; + case TReverberationOrder::THREEDIMENSIONAL: + SetABIRThreedimensional(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks()); + break; + case TReverberationOrder::ADIMENSIONAL: + SetABIRAdimensional(bufferLength, GetABIR().GetDataBlockLength_freq(), GetABIR().GetDataNumberOfBlocks()); + break; + } + + return result; #endif } + else + { + return false; + } // TODO if (GET_LAST_RESULT() != OK) { RAISE_NOT_INITIALISED_ERROR(...); } } + else { + return false; + } } - void CEnvironment::CalculateABIRPartitioned() + bool CEnvironment::CalculateABIRPartitionedAdimensional() { - environmentABIR.Setup(ownerCore->GetAudioState().bufferSize, environmentBRIR->GetBRIRLength()); - //1. Get BRIR values for each channel - TImpulseResponse_Partitioned northLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); - TImpulseResponse_Partitioned southLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); - TImpulseResponse_Partitioned eastLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); - TImpulseResponse_Partitioned westLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); - TImpulseResponse_Partitioned northRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); - TImpulseResponse_Partitioned southRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); - TImpulseResponse_Partitioned eastRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); - TImpulseResponse_Partitioned westRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned northLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned southLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned eastLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned westLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned zenitLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::LEFT); + TImpulseResponse_Partitioned nadirLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::LEFT); + + TImpulseResponse_Partitioned northRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned southRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned eastRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned westRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned zenitRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned nadirRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::RIGHT); long s = northLeft.size(); - if ( s == 0 || - northLeft .size() != s || - southLeft .size() != s || - eastLeft .size() != s || - westLeft .size() != s || - northRight.size() != s || - southRight.size() != s || - eastRight .size() != s || - westRight .size() != s ) + TImpulseResponse_Partitioned newAIR_W_left, newAIR_X_left, newAIR_Y_left, newAIR_Z_left; + TImpulseResponse_Partitioned newAIR_W_right, newAIR_X_right, newAIR_Y_right, newAIR_Z_right; + + if (s == 0 || + northLeft.size() != s || environmentBRIR->IsIREmpty(northLeft) || + southLeft.size() != s || environmentBRIR->IsIREmpty(southLeft) || + eastLeft.size() != s || environmentBRIR->IsIREmpty(eastLeft) || + westLeft.size() != s || environmentBRIR->IsIREmpty(westLeft) || + northRight.size() != s || environmentBRIR->IsIREmpty(northRight) || + southRight.size() != s || environmentBRIR->IsIREmpty(southRight) || + eastRight.size() != s || environmentBRIR->IsIREmpty(eastRight) || + westRight.size() != s || environmentBRIR->IsIREmpty(westRight)) { - SET_RESULT( RESULT_ERROR_BADSIZE, "Buffers should be the same and not zero" ); - return; + SET_RESULT(RESULT_ERROR_BADSIZE, "Buffers should be the same and not zero"); + return false; } - + bool useZAxis = true; + if (zenitLeft.size() != s || environmentBRIR->IsIREmpty(zenitLeft) || + zenitRight.size() != s || environmentBRIR->IsIREmpty(zenitRight) || + nadirLeft.size() != s || environmentBRIR->IsIREmpty(nadirLeft) || + nadirRight.size() != s || environmentBRIR->IsIREmpty(nadirRight)) { + useZAxis = false; + } + //2. Init AIR buffers + newAIR_W_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_X_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Y_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + + newAIR_W_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_X_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Y_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + + if (useZAxis) { + newAIR_Z_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Z_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + } + + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + { + newAIR_W_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_X_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Y_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_W_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_X_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Y_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + + if (useZAxis) { + newAIR_Z_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Z_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + } + + } + + //3. AIR codification from BRIR + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + { + for (int j = 0; j < environmentBRIR->GetBRIROneSubfilterLength(); j++) + { + newAIR_W_left[i][j] = 0.707107f * (northLeft[i][j] + southLeft[i][j] + eastLeft[i][j] + westLeft[i][j]); + newAIR_X_left[i][j] = northLeft[i][j] - southLeft[i][j]; + newAIR_Y_left[i][j] = westLeft[i][j] - eastLeft[i][j]; + + newAIR_W_right[i][j] = 0.707107f * (northRight[i][j] + southRight[i][j] + eastRight[i][j] + westRight[i][j]); + newAIR_X_right[i][j] = northRight[i][j] - southRight[i][j]; + newAIR_Y_right[i][j] = westRight[i][j] - eastRight[i][j]; + + if (useZAxis) { + newAIR_W_left[i][j] += 0.707107f * (zenitLeft[i][j] + nadirLeft[i][j]); + newAIR_W_right[i][j] += 0.707107f * (zenitRight[i][j] + nadirRight[i][j]); + newAIR_Z_right[i][j] = zenitRight[i][j] - nadirRight[i][j]; + newAIR_Z_left[i][j] = zenitLeft[i][j] - nadirLeft[i][j]; + } + else + { + newAIR_W_left[i][j] *= 1.57814f; //This statements equalize power of channel W when only two axis are used in 0D and 2D mode + newAIR_W_right[i][j] *= 1.57814f; //because 2D mode compensates the lack of Z channel by adding power to X channel + } + } + } + + //Setup AIR class + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); + + if (useZAxis) { + environmentABIR.AddImpulseResponse(TBFormatChannel::Z, Common::T_ear::LEFT, std::move(newAIR_Z_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Z, Common::T_ear::RIGHT, std::move(newAIR_Z_right)); + } + + return true; + } + + bool CEnvironment::CalculateABIRPartitionedBidimensional() + { + //1. Get BRIR values for each channel + TImpulseResponse_Partitioned northLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned southLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned eastLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned westLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned northRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned southRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned eastRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned westRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + + long s = northLeft.size(); + TImpulseResponse_Partitioned newAIR_W_left, newAIR_X_left, newAIR_Y_left; TImpulseResponse_Partitioned newAIR_W_right, newAIR_X_right, newAIR_Y_right; + + if (s == 0 || + northLeft.size() != s || environmentBRIR->IsIREmpty(northLeft) || + southLeft.size() != s || environmentBRIR->IsIREmpty(southLeft) || + eastLeft.size() != s || environmentBRIR->IsIREmpty(eastLeft) || + westLeft.size() != s || environmentBRIR->IsIREmpty(westLeft) || + northRight.size() != s || environmentBRIR->IsIREmpty(northRight) || + southRight.size() != s || environmentBRIR->IsIREmpty(southRight) || + eastRight.size() != s || environmentBRIR->IsIREmpty(eastRight) || + westRight.size() != s || environmentBRIR->IsIREmpty(westRight)) + { + + SET_RESULT(RESULT_ERROR_BADSIZE, "Buffers should be the same and not zero"); + return false; + } + + //2. Init AIR buffers newAIR_W_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); newAIR_X_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); newAIR_Y_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); newAIR_W_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); newAIR_X_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); newAIR_Y_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); - - for (int i=0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + + + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) { newAIR_W_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); newAIR_X_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); @@ -179,9 +360,9 @@ namespace Binaural { } //3. AIR codification from BRIR - for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) { - for (int j = 0; j < environmentBRIR->GetBRIROneSubfilterLength(); j++) + for (int j = 0; j < environmentBRIR->GetBRIROneSubfilterLength(); j++) { newAIR_W_left[i][j] = 0.707107f * (northLeft[i][j] + southLeft[i][j] + eastLeft[i][j] + westLeft[i][j]); newAIR_X_left[i][j] = northLeft[i][j] - southLeft[i][j]; @@ -194,39 +375,173 @@ namespace Binaural { } //Setup AIR class - environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); - environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); - environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); - } + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); + return true; + } - void CEnvironment::CalculateABIRwithoutPartitions() + bool CEnvironment::CalculateABIRPartitionedThreedimensional() { //1. Get BRIR values for each channel - TImpulseResponse northLeft = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); - TImpulseResponse southLeft = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); - TImpulseResponse eastLeft = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); - TImpulseResponse westLeft = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); - TImpulseResponse northRight = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); - TImpulseResponse southRight = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); - TImpulseResponse eastRight = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); - TImpulseResponse westRight = environmentBRIR-> GetBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned northLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned southLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned eastLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned westLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned zenitLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::LEFT); + TImpulseResponse_Partitioned nadirLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::LEFT); + + TImpulseResponse_Partitioned northRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned southRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned eastRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned westRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned zenitRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned nadirRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::RIGHT); + + long s = northLeft.size(); + + TImpulseResponse_Partitioned newAIR_W_left, newAIR_X_left, newAIR_Y_left, newAIR_Z_left; + TImpulseResponse_Partitioned newAIR_W_right, newAIR_X_right, newAIR_Y_right, newAIR_Z_right; + + if (s == 0 || + northLeft.size() != s || environmentBRIR->IsIREmpty(northLeft) || + southLeft.size() != s || environmentBRIR->IsIREmpty(southLeft) || + eastLeft.size() != s || environmentBRIR->IsIREmpty(eastLeft) || + westLeft.size() != s || environmentBRIR->IsIREmpty(westLeft) || + northRight.size() != s || environmentBRIR->IsIREmpty(northRight) || + southRight.size() != s || environmentBRIR->IsIREmpty(southRight) || + eastRight.size() != s || environmentBRIR->IsIREmpty(eastRight) || + westRight.size() != s || environmentBRIR->IsIREmpty(westRight) || + zenitLeft.size() != s || environmentBRIR->IsIREmpty(zenitLeft) || + zenitRight.size() != s || environmentBRIR->IsIREmpty(zenitRight) || + nadirLeft.size() != s || environmentBRIR->IsIREmpty(nadirLeft) || + nadirRight.size() != s || environmentBRIR->IsIREmpty(nadirRight)) + { + SET_RESULT(RESULT_ERROR_BADSIZE, "Buffers should be the same and not zero"); + return false; + } //2. Init AIR buffers - TImpulseResponse newAIR_W_left, newAIR_X_left, newAIR_Y_left; - TImpulseResponse newAIR_W_right, newAIR_X_right, newAIR_Y_right; - newAIR_W_left. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); - newAIR_X_left. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); - newAIR_Y_left. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); - newAIR_W_right. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); - newAIR_X_right. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); - newAIR_Y_right. resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_W_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_X_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Y_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Z_left.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_W_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_X_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Y_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + newAIR_Z_right.resize(environmentBRIR->GetBRIRNumberOfSubfilters()); + + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + { + newAIR_W_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_X_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Y_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Z_left[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_W_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_X_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Y_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + newAIR_Z_right[i].resize(environmentBRIR->GetBRIROneSubfilterLength(), 0.0f); + } - //3. AIR codification from BRIR + for (int i = 0; i < environmentBRIR->GetBRIRNumberOfSubfilters(); i++) + { + for (int j = 0; j < environmentBRIR->GetBRIROneSubfilterLength(); j++) + { + newAIR_W_left[i][j] = 0.707107f * (northLeft[i][j] + southLeft[i][j] + eastLeft[i][j] + westLeft[i][j] + zenitLeft[i][j] + nadirLeft[i][j]); + newAIR_X_left[i][j] = northLeft[i][j] - southLeft[i][j]; + newAIR_Y_left[i][j] = westLeft[i][j] - eastLeft[i][j]; + newAIR_Z_left[i][j] = zenitLeft[i][j] - nadirLeft[i][j]; + + newAIR_W_right[i][j] = 0.707107f * (northRight[i][j] + southRight[i][j] + eastRight[i][j] + westRight[i][j] + zenitRight[i][j] + nadirRight[i][j]); + newAIR_X_right[i][j] = northRight[i][j] - southRight[i][j]; + newAIR_Y_right[i][j] = westRight[i][j] - eastRight[i][j]; + newAIR_Z_right[i][j] = zenitRight[i][j] - nadirRight[i][j]; + + } + } + + //Setup AIR class + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Z, Common::T_ear::LEFT, std::move(newAIR_Z_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Z, Common::T_ear::RIGHT, std::move(newAIR_Z_right)); + + return true; + } + + + bool CEnvironment::CalculateABIRPartitioned() + { + environmentABIR.Setup(ownerCore->GetAudioState().bufferSize, environmentBRIR->GetBRIRLength()); + + + //1. Get BRIR values for each channel + TImpulseResponse_Partitioned northLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned southLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); + TImpulseResponse_Partitioned eastLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned westLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); + TImpulseResponse_Partitioned zenitLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::LEFT); + TImpulseResponse_Partitioned nadirLeft = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::LEFT); + + TImpulseResponse_Partitioned northRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned southRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned eastRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned westRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned zenitRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::ZENIT, Common::T_ear::RIGHT); + TImpulseResponse_Partitioned nadirRight = environmentBRIR->GetBRIR_Partitioned(VirtualSpeakerPosition::NADIR, Common::T_ear::RIGHT); + + long s = northLeft.size(); + + TImpulseResponse_Partitioned newAIR_W_left, newAIR_X_left, newAIR_Y_left, newAIR_Z_left; + TImpulseResponse_Partitioned newAIR_W_right, newAIR_X_right, newAIR_Y_right, newAIR_Z_right; + + switch (reverberationOrder) { + case TReverberationOrder::BIDIMENSIONAL: + return CalculateABIRPartitionedBidimensional(); + case TReverberationOrder::THREEDIMENSIONAL: + return CalculateABIRPartitionedThreedimensional(); + case TReverberationOrder::ADIMENSIONAL: + return CalculateABIRPartitionedAdimensional(); + default: return false; + } + } + + + void CEnvironment::CalculateABIRwithoutPartitions() + { + if (reverberationOrder == TReverberationOrder::BIDIMENSIONAL) { + + //1. Get BRIR values for each channel + TImpulseResponse northLeft = environmentBRIR->GetBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::LEFT); + TImpulseResponse southLeft = environmentBRIR->GetBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::LEFT); + TImpulseResponse eastLeft = environmentBRIR->GetBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::LEFT); + TImpulseResponse westLeft = environmentBRIR->GetBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::LEFT); + TImpulseResponse northRight = environmentBRIR->GetBRIR(VirtualSpeakerPosition::NORTH, Common::T_ear::RIGHT); + TImpulseResponse southRight = environmentBRIR->GetBRIR(VirtualSpeakerPosition::SOUTH, Common::T_ear::RIGHT); + TImpulseResponse eastRight = environmentBRIR->GetBRIR(VirtualSpeakerPosition::EAST, Common::T_ear::RIGHT); + TImpulseResponse westRight = environmentBRIR->GetBRIR(VirtualSpeakerPosition::WEST, Common::T_ear::RIGHT); + + //2. Init AIR buffers + TImpulseResponse newAIR_W_left, newAIR_X_left, newAIR_Y_left; + TImpulseResponse newAIR_W_right, newAIR_X_right, newAIR_Y_right; + newAIR_W_left.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_X_left.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_Y_left.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_W_right.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_X_right.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + newAIR_Y_right.resize(environmentBRIR->GetBRIRLength_frequency(), 0.0f); + + + //3. AIR codification from BRIR for (int j = 0; j < environmentBRIR->GetBRIRLength_frequency(); j++) { @@ -238,15 +553,25 @@ namespace Binaural { newAIR_X_right[j] = northRight[j] - southRight[j]; newAIR_Y_right[j] = westRight[j] - eastRight[j]; } - - //Setup AIR class - environmentABIR.Setup(ownerCore->GetAudioState().bufferSize, environmentBRIR->GetBRIRLength()); - environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); - environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); - environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); - environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); + + //Setup AIR class + environmentABIR.Setup(ownerCore->GetAudioState().bufferSize, environmentBRIR->GetBRIRLength()); + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::LEFT, std::move(newAIR_W_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::W, Common::T_ear::RIGHT, std::move(newAIR_W_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::LEFT, std::move(newAIR_X_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::X, Common::T_ear::RIGHT, std::move(newAIR_X_right)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::LEFT, std::move(newAIR_Y_left)); + environmentABIR.AddImpulseResponse(TBFormatChannel::Y, Common::T_ear::RIGHT, std::move(newAIR_Y_right)); + } + else { + if (reverberationOrder == TReverberationOrder::THREEDIMENSIONAL){ + + } + else + { + + } + } } ////////////////////////////////////////////// @@ -257,15 +582,26 @@ namespace Binaural { } ////////////////////////////////////////////// - - // Process virtual ambisonic reverb for specified buffers - void CEnvironment::ProcessVirtualAmbisonicReverb(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight ) + void CEnvironment::ProcessVirtualAmbisonicReverbAdimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight) { - if (!environmentABIR.IsInitialized()) - { - SET_RESULT(RESULT_ERROR_NOTINITIALIZED, "Data is not ready to be processed"); + CMonoBuffer w; // B-Format data + CMonoBuffer w_AbirW_left_FFT; + CMonoBuffer w_AbirW_right_FFT; + CMonoBuffer mixerOutput_left_FFT; + CMonoBuffer mixerOutput_right_FFT; + CMonoBuffer mixerOutput_left; + CMonoBuffer mixerOutput_right; + CMonoBuffer ouputBuffer_temp; + + + float WScale = 0.707107f; + + // We assume all buffers have the same number of samples + size_t samplesInBuffer = ownerCore->GetAudioState().bufferSize; + + // This would crash if there are no sources created. Rather than reporting error, do nothing + if (ownerCore->audioSources.size() == 0) return; - } ///////////////////////////////////////// // 1-st Order Virtual Ambisonics Encoder @@ -273,20 +609,9 @@ namespace Binaural { #ifdef USE_PROFILER_Environment PROFILER3DTI.RelativeSampleStart(dsEnvEncoder); #endif - CMonoBuffer w, x, y; // B-Format data - - // This would crash if there are no sources created. Rather than reporting error, do nothing - if (ownerCore->audioSources.size() == 0) - return; - - // We assume all buffers have the same number of samples - size_t samplesInBuffer = ownerCore->GetAudioState().bufferSize; // Init sumation for B-Format channels w.Fill(samplesInBuffer, 0.0f); - x.Fill(samplesInBuffer, 0.0f); - y.Fill(samplesInBuffer, 0.0f); - float WScale = 0.707107f; // Go through each source //for (int nSource = 0; nSource < ownerCore->audioSources.size(); nSource++) @@ -304,23 +629,22 @@ namespace Binaural { // Get azimuth, elevation and distance from listener to each source // We precompute everything, to minimize per-sample computations. Common::CTransform sourceTransform = eachSource->GetSourceTransform(); - sourceTransform = eachSource->CalculateTransformPositionWithRestrictions(sourceTransform); Common::CVector3 vectorToSource = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(sourceTransform); float sourceAzimuth = vectorToSource.GetAzimuthRadians(); float sourceElevation = vectorToSource.GetElevationRadians(); float sourceDistance = vectorToSource.GetDistance(); float cosAzimuth = std::cos(sourceAzimuth); float sinAzimuth = std::sin(sourceAzimuth); - float sinElevationAbs = std::fabs(std::sin(sourceElevation)); // TEST: adding power to W channel to compensate for the lack of Z channel + float sinElevation = std::sin(sourceElevation); float cosElevation = std::cos(sourceElevation); float cosAcosE = cosAzimuth * cosElevation; - float sinAcosE = sinAzimuth * cosElevation; + float sinAcosE = sinAzimuth * cosElevation; CMonoBuffer sourceBuffer = eachSource->GetBuffer(); //ASSERT(sourceBuffer.size() > 0, RESULT_ERROR_NOTSET, "Attempt to process virtual ambisonics reverb without previously feeding audio source buffers", ""); //Apply Distance Attenuation - float distanceAttenuation_ReverbConstant = ownerCore->GetMagnitudes().GetReverbDistanceAttenuation(); - if (eachSource->IsDistanceAttenuationEnabledReverb()) { + float distanceAttenuation_ReverbConstant = ownerCore->GetMagnitudes().GetReverbDistanceAttenuation(); + if (eachSource->IsDistanceAttenuationEnabledReverb()) { eachSource->distanceAttenuatorReverb.Process(sourceBuffer, sourceDistance, distanceAttenuation_ReverbConstant, ownerCore->GetAudioState().bufferSize, ownerCore->GetAudioState().sampleRate); } // Go trough each sample @@ -331,9 +655,6 @@ namespace Binaural { // Add partial contribution of this source to each B-format channel w[nSample] += newSample * WScale; - x[nSample] += newSample * cosAcosE; - x[nSample] += newSample * sinElevationAbs; // Adding power to X channel to compensate for the lack of Z channel - y[nSample] += newSample * sinAcosE; } // Set flag for reverb process @@ -351,14 +672,6 @@ namespace Binaural { #endif //TODO All this could be parallelized - bool bUPConvolution = true; - - CMonoBuffer w_AbirW_left_FFT; - CMonoBuffer w_AbirW_right_FFT; - CMonoBuffer x_AbirX_left_FFT; - CMonoBuffer x_AbirX_right_FFT; - CMonoBuffer y_AbirY_left_FFT; - CMonoBuffer y_AbirY_right_FFT; #ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB /////// @@ -395,10 +708,7 @@ namespace Binaural { ///Apply UPC algorithm wLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), w_AbirW_left_FFT); wRight_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), w_AbirW_right_FFT); - xLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::LEFT), x_AbirX_left_FFT); - xRight_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::RIGHT), x_AbirX_right_FFT); - yLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::LEFT), y_AbirY_left_FFT); - yRight_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::RIGHT), y_AbirY_right_FFT); + #endif @@ -414,28 +724,25 @@ namespace Binaural { PROFILER3DTI.RelativeSampleStart(dsEnvInvFFT); #endif - CMonoBuffer mixerOutput_left_FFT; - mixerOutput_left_FFT.SetFromMix({ w_AbirW_left_FFT, x_AbirX_left_FFT, y_AbirY_left_FFT }); - CMonoBuffer mixerOutput_right_FFT; - mixerOutput_right_FFT.SetFromMix({ w_AbirW_right_FFT, x_AbirX_right_FFT, y_AbirY_right_FFT }); + mixerOutput_left_FFT.SetFromMix({ w_AbirW_left_FFT }); + mixerOutput_right_FFT.SetFromMix({ w_AbirW_right_FFT }); //////////////////////////////////////// // FFT-1 Going back to the time domain //////////////////////////////////////// //TODO All this could be parallelized - CMonoBuffer mixerOutput_left; - CMonoBuffer mixerOutput_right; + #ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB outputLeft.CalculateIFFT_OLA(mixerOutput_left_FFT, mixerOutput_left); outputRight.CalculateIFFT_OLA(mixerOutput_right_FFT, mixerOutput_right); #else //Left channel - CMonoBuffer ouputBuffer_temp; Common::CFprocessor::CalculateIFFT(mixerOutput_left_FFT, ouputBuffer_temp); //We are left only with the final half of the result int halfsize = (int)(ouputBuffer_temp.size() * 0.5f); + CMonoBuffer temp_OutputBlockLeft(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); mixerOutput_left = std::move(temp_OutputBlockLeft); //To use in C++11 @@ -447,12 +754,12 @@ namespace Binaural { CMonoBuffer temp_OutputBlockRight(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); mixerOutput_right = std::move(temp_OutputBlockRight); //To use in C++11 #endif - - ////////////////////////////////////////////// - // Mix of chabbels decoded after convolution - ////////////////////////////////////////////// - - //Interlace TODO Use the method in bufferClass?? + + ////////////////////////////////////////////// + // Mix of chabbels decoded after convolution + ////////////////////////////////////////////// + + //Interlace TODO Use the method in bufferClass?? for (int i = 0; i < mixerOutput_left.size(); i++) { outBufferLeft.push_back(mixerOutput_left[i]); outBufferRight.push_back(mixerOutput_right[i]); @@ -480,35 +787,546 @@ namespace Binaural { WATCH(WV_ENVIRONMENT_OUTPUT_LEFT, outBufferLeft, CMonoBuffer); WATCH(WV_ENVIRONMENT_OUTPUT_RIGHT, outBufferRight, CMonoBuffer); } - - void CEnvironment::ProcessDirectionality(CMonoBuffer &buffer, float directionalityAttenutaion) - { - buffer.ApplyGain(directionalityAttenutaion); - } - - - // Process virtual ambisonic reverb for specified buffers - void CEnvironment::ProcessVirtualAmbisonicReverb(CStereoBuffer & outBuffer) + void CEnvironment::ProcessVirtualAmbisonicReverbBidimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight) { - CMonoBuffer outLeftBuffer; - CMonoBuffer outRightBuffer; - ProcessVirtualAmbisonicReverb(outLeftBuffer, outRightBuffer); - outBuffer.Interlace(outLeftBuffer, outRightBuffer); - } -////////////////////////////////////////////// - - // Process reverb for one b-format channel encoded with 1st order ambisonics (useful for some wrappers) - void CEnvironment::ProcessEncodedChannelReverb(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output) + CMonoBuffer w, x, y; // B-Format data + CMonoBuffer w_AbirW_left_FFT; + CMonoBuffer w_AbirW_right_FFT; + CMonoBuffer x_AbirX_left_FFT; + CMonoBuffer x_AbirX_right_FFT; + CMonoBuffer y_AbirY_left_FFT; + CMonoBuffer y_AbirY_right_FFT; + CMonoBuffer mixerOutput_left_FFT; + CMonoBuffer mixerOutput_right_FFT; + CMonoBuffer mixerOutput_left; + CMonoBuffer mixerOutput_right; + CMonoBuffer ouputBuffer_temp; + + + float WScale = 0.707107f; + + // We assume all buffers have the same number of samples + size_t samplesInBuffer = ownerCore->GetAudioState().bufferSize; + + ///////////////////////////////////////// + // 1-st Order Virtual Ambisonics Encoder + ///////////////////////////////////////// +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvEncoder); +#endif + + + // Init sumation for B-Format channels + w.Fill(samplesInBuffer, 0.0f); + x.Fill(samplesInBuffer, 0.0f); + y.Fill(samplesInBuffer, 0.0f); + + // Go through each source + //for (int nSource = 0; nSource < ownerCore->audioSources.size(); nSource++) + for (auto eachSource : ownerCore->audioSources) + { + // Check source flags for reverb process + if (!eachSource->IsReverbProcessEnabled()) + continue; + if (!eachSource->IsReverbProcessReady()) + { + SET_RESULT(RESULT_WARNING, "Attempt to do reverb process without updating source buffer; please call to SetBuffer before ProcessVirtualAmbisonicReverb."); + continue; + } + + //Check if the source is in the same position as the listener head. If yes, do not apply spatialization to this source + if (eachSource->distanceToListener < ownerCore->GetListener()->GetHeadRadius()) + { + continue; + } + + // Get azimuth, elevation and distance from listener to each source + // We precompute everything, to minimize per-sample computations. + Common::CTransform sourceTransform = eachSource->GetSourceTransform(); + Common::CVector3 vectorToSource = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(sourceTransform); + float sourceAzimuth = vectorToSource.GetAzimuthRadians(); + float sourceElevation = vectorToSource.GetElevationRadians(); + float sourceDistance = vectorToSource.GetDistance(); + float cosAzimuth = std::cos(sourceAzimuth); + float sinAzimuth = std::sin(sourceAzimuth); + float sinElevationAbs = std::fabs(std::sin(sourceElevation)); // TEST: adding power to W channel to compensate for the lack of Z channel + float cosElevation = std::cos(sourceElevation); + float cosAcosE = cosAzimuth * cosElevation; + float sinAcosE = sinAzimuth * cosElevation; + CMonoBuffer sourceBuffer = eachSource->GetBuffer(); + //ASSERT(sourceBuffer.size() > 0, RESULT_ERROR_NOTSET, "Attempt to process virtual ambisonics reverb without previously feeding audio source buffers", ""); + + //Apply Distance Attenuation + float distanceAttenuation_ReverbConstant = ownerCore->GetMagnitudes().GetReverbDistanceAttenuation(); + if (eachSource->IsDistanceAttenuationEnabledReverb()) { + eachSource->distanceAttenuatorReverb.Process(sourceBuffer, sourceDistance, distanceAttenuation_ReverbConstant, ownerCore->GetAudioState().bufferSize, ownerCore->GetAudioState().sampleRate); + } + // Go trough each sample + for (int nSample = 0; nSample < samplesInBuffer; nSample++) + { + // Value from the input buffer + float newSample = sourceBuffer[nSample]; + + // Add partial contribution of this source to each B-format channel + w[nSample] += newSample * WScale; + x[nSample] += newSample * cosAcosE; + x[nSample] += newSample * sinElevationAbs; // Adding power to X channel to compensate for the lack of Z channel + y[nSample] += newSample * sinAcosE; + } + + // Set flag for reverb process + eachSource->SetReverbProcessNotReady(); + } + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvEncoder); +#endif + /////////////////////////////////////////// + // Frequency-Domain Convolution with ABIR + /////////////////////////////////////////// +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvConvolver); +#endif + //TODO All this could be parallelized + + +#ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB + /////// + // W // + /////// + CMonoBuffer w_FFT; + //Make FFT of W + Common::CFprocessor::GetFFT(w, w_FFT, environmentABIR.GetDataLength()); + Common::CFprocessor::ComplexMultiplication(w_FFT, GetABIR().GetImpulseResponse(TBFormatChannel::W, T_ear::LEFT), w_AbirW_left_FFT); + Common::CFprocessor::ComplexMultiplication(w_FFT, GetABIR().GetImpulseResponse(TBFormatChannel::W, T_ear::RIGHT), w_AbirW_right_FFT); + + /////// + // X // + /////// + CMonoBuffer x_FFT; + //Make FFT of X + Common::CFprocessor::GetFFT(x, x_FFT, environmentABIR.GetDataLength()); + //Complex Product + Common::CFprocessor::ComplexMultiplication(x_FFT, GetABIR().GetImpulseResponse(X, T_ear::LEFT), x_AbirX_left_FFT); + Common::CFprocessor::ComplexMultiplication(x_FFT, GetABIR().GetImpulseResponse(X, T_ear::RIGHT), x_AbirX_right_FFT); + + /////// + // Y // + /////// + CMonoBuffer y_FFT; + //TBFormatChannelData abirY = GetABIR().GetChannelData(Y); + //Make FFT of Y + Common::CFprocessor::GetFFT(y, y_FFT, environmentABIR.GetDataLength()); + //Complex Product + Common::CFprocessor::ComplexMultiplication(y_FFT, GetABIR().GetImpulseResponse(Y, T_ear::LEFT), y_AbirY_left_FFT); + Common::CFprocessor::ComplexMultiplication(y_FFT, GetABIR().GetImpulseResponse(Y, T_ear::RIGHT), y_AbirY_right_FFT); +#else + + ///Apply UPC algorithm + wLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), w_AbirW_left_FFT); + wRight_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), w_AbirW_right_FFT); + xLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::LEFT), x_AbirX_left_FFT); + xRight_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::RIGHT), x_AbirX_right_FFT); + yLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::LEFT), y_AbirY_left_FFT); + yRight_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::RIGHT), y_AbirY_right_FFT); +#endif + + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvConvolver); +#endif + + /////////////////////////////////////// + // Mix of channels in Frequency domain + /////////////////////////////////////// + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvInvFFT); +#endif + + mixerOutput_left_FFT.SetFromMix({ w_AbirW_left_FFT, x_AbirX_left_FFT, y_AbirY_left_FFT }); + mixerOutput_right_FFT.SetFromMix({ w_AbirW_right_FFT, x_AbirX_right_FFT, y_AbirY_right_FFT }); + + //////////////////////////////////////// + // FFT-1 Going back to the time domain + //////////////////////////////////////// + + //TODO All this could be parallelized + + +#ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB + outputLeft.CalculateIFFT_OLA(mixerOutput_left_FFT, mixerOutput_left); + outputRight.CalculateIFFT_OLA(mixerOutput_right_FFT, mixerOutput_right); +#else + //Left channel + Common::CFprocessor::CalculateIFFT(mixerOutput_left_FFT, ouputBuffer_temp); + //We are left only with the final half of the result + int halfsize = (int)(ouputBuffer_temp.size() * 0.5f); + + CMonoBuffer temp_OutputBlockLeft(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); + mixerOutput_left = std::move(temp_OutputBlockLeft); //To use in C++11 + + //Right channel + ouputBuffer_temp.clear(); + Common::CFprocessor::CalculateIFFT(mixerOutput_right_FFT, ouputBuffer_temp); + //We are left only with the final half of the result + halfsize = (int)(ouputBuffer_temp.size() * 0.5f); + CMonoBuffer temp_OutputBlockRight(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); + mixerOutput_right = std::move(temp_OutputBlockRight); //To use in C++11 +#endif + + ////////////////////////////////////////////// + // Mix of channels decoded after convolution + ////////////////////////////////////////////// + + //Interlace TODO Use the method in bufferClass?? + for (int i = 0; i < mixerOutput_left.size(); i++) { + outBufferLeft.push_back(mixerOutput_left[i]); + outBufferRight.push_back(mixerOutput_right[i]); + } + + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvInvFFT); +#endif + ////////////////////////////////////////////////////////// + // TO DO: REVERBERATION DELAY + ////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////// + // HA Directionality in reverb path + ////////////////////////////////////////////////////////// + if (ownerCore->GetListener()->IsDirectionalityEnabled(Common::T_ear::LEFT)) { + ProcessDirectionality(outBufferLeft, ownerCore->GetListener()->GetReverbDirectionalityAttenuation_dB(Common::T_ear::LEFT)); + } + if (ownerCore->GetListener()->IsDirectionalityEnabled(Common::T_ear::RIGHT)) { + ProcessDirectionality(outBufferRight, ownerCore->GetListener()->GetReverbDirectionalityAttenuation_dB(Common::T_ear::RIGHT)); + } + + // WATCHER + WATCH(WV_ENVIRONMENT_OUTPUT_LEFT, outBufferLeft, CMonoBuffer); + WATCH(WV_ENVIRONMENT_OUTPUT_RIGHT, outBufferRight, CMonoBuffer); + } + void CEnvironment::ProcessVirtualAmbisonicReverbThreedimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight) + { + CMonoBuffer w, x, y, z; // B-Format data + CMonoBuffer w_AbirW_left_FFT; + CMonoBuffer w_AbirW_right_FFT; + CMonoBuffer x_AbirX_left_FFT; + CMonoBuffer x_AbirX_right_FFT; + CMonoBuffer y_AbirY_left_FFT; + CMonoBuffer y_AbirY_right_FFT; + CMonoBuffer z_AbirZ_left_FFT; + CMonoBuffer z_AbirZ_right_FFT; + CMonoBuffer mixerOutput_left_FFT; + CMonoBuffer mixerOutput_right_FFT; + CMonoBuffer mixerOutput_left; + CMonoBuffer mixerOutput_right; + CMonoBuffer ouputBuffer_temp; + + float WScale = 0.707107f; + + // We assume all buffers have the same number of samples + size_t samplesInBuffer = ownerCore->GetAudioState().bufferSize; + + ///////////////////////////////////////// + // 1-st Order Virtual Ambisonics Encoder + ///////////////////////////////////////// +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvEncoder); +#endif + + // Init sumation for B-Format channels + w.Fill(samplesInBuffer, 0.0f); + x.Fill(samplesInBuffer, 0.0f); + y.Fill(samplesInBuffer, 0.0f); + z.Fill(samplesInBuffer, 0.0f); + + + // Go through each source + //for (int nSource = 0; nSource < ownerCore->audioSources.size(); nSource++) + for (auto eachSource : ownerCore->audioSources) + { + // Check source flags for reverb process + if (!eachSource->IsReverbProcessEnabled()) + continue; + if (!eachSource->IsReverbProcessReady()) + { + SET_RESULT(RESULT_WARNING, "Attempt to do reverb process without updating source buffer; please call to SetBuffer before ProcessVirtualAmbisonicReverb."); + continue; + } + + // Get azimuth, elevation and distance from listener to each source + // We precompute everything, to minimize per-sample computations. + Common::CTransform sourceTransform = eachSource->GetSourceTransform(); + Common::CVector3 vectorToSource = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(sourceTransform); + float sourceAzimuth = vectorToSource.GetAzimuthRadians(); + float sourceElevation = vectorToSource.GetElevationRadians(); + float sourceDistance = vectorToSource.GetDistance(); + float cosAzimuth = std::cos(sourceAzimuth); + float sinAzimuth = std::sin(sourceAzimuth); + float sinElevation = std::sin(sourceElevation); + float cosElevation = std::cos(sourceElevation); + float cosAcosE = cosAzimuth * cosElevation; + float sinAcosE = sinAzimuth * cosElevation; + CMonoBuffer sourceBuffer = eachSource->GetBuffer(); + //ASSERT(sourceBuffer.size() > 0, RESULT_ERROR_NOTSET, "Attempt to process virtual ambisonics reverb without previously feeding audio source buffers", ""); + + //Apply Distance Attenuation + float distanceAttenuation_ReverbConstant = ownerCore->GetMagnitudes().GetReverbDistanceAttenuation(); + if (eachSource->IsDistanceAttenuationEnabledReverb()) { + eachSource->distanceAttenuatorReverb.Process(sourceBuffer, sourceDistance, distanceAttenuation_ReverbConstant, ownerCore->GetAudioState().bufferSize, ownerCore->GetAudioState().sampleRate); + } + // Go trough each sample + for (int nSample = 0; nSample < samplesInBuffer; nSample++) + { + // Value from the input buffer + float newSample = sourceBuffer[nSample]; + + // Add partial contribution of this source to each B-format channel + w[nSample] += newSample * WScale; + x[nSample] += newSample * cosAcosE; + z[nSample] += newSample * sinElevation; + y[nSample] += newSample * sinAcosE; + } + + // Set flag for reverb process + eachSource->SetReverbProcessNotReady(); + } + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvEncoder); +#endif + /////////////////////////////////////////// + // Frequency-Domain Convolution with ABIR + /////////////////////////////////////////// +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvConvolver); +#endif + //TODO All this could be parallelized + + +#ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB + /////// + // W // + /////// + CMonoBuffer w_FFT; + //Make FFT of W + Common::CFprocessor::GetFFT(w, w_FFT, environmentABIR.GetDataLength()); + Common::CFprocessor::ComplexMultiplication(w_FFT, GetABIR().GetImpulseResponse(TBFormatChannel::W, T_ear::LEFT), w_AbirW_left_FFT); + Common::CFprocessor::ComplexMultiplication(w_FFT, GetABIR().GetImpulseResponse(TBFormatChannel::W, T_ear::RIGHT), w_AbirW_right_FFT); + + /////// + // X // + /////// + CMonoBuffer x_FFT; + //Make FFT of X + Common::CFprocessor::GetFFT(x, x_FFT, environmentABIR.GetDataLength()); + //Complex Product + Common::CFprocessor::ComplexMultiplication(x_FFT, GetABIR().GetImpulseResponse(X, T_ear::LEFT), x_AbirX_left_FFT); + Common::CFprocessor::ComplexMultiplication(x_FFT, GetABIR().GetImpulseResponse(X, T_ear::RIGHT), x_AbirX_right_FFT); + + /////// + // Y // + /////// + CMonoBuffer y_FFT; + //TBFormatChannelData abirY = GetABIR().GetChannelData(Y); + //Make FFT of Y + Common::CFprocessor::GetFFT(y, y_FFT, environmentABIR.GetDataLength()); + //Complex Product + Common::CFprocessor::ComplexMultiplication(y_FFT, GetABIR().GetImpulseResponse(Y, T_ear::LEFT), y_AbirY_left_FFT); + Common::CFprocessor::ComplexMultiplication(y_FFT, GetABIR().GetImpulseResponse(Y, T_ear::RIGHT), y_AbirY_right_FFT); +#else + + ///Apply UPC algorithm + wLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), w_AbirW_left_FFT); + wRight_UPConvolution.ProcessUPConvolution_withoutIFFT(w, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), w_AbirW_right_FFT); + xLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::X, Common::T_ear::LEFT), x_AbirX_left_FFT); + xRight_UPConvolution.ProcessUPConvolution_withoutIFFT(x, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::X, Common::T_ear::RIGHT), x_AbirX_right_FFT); + yLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::Y, Common::T_ear::LEFT), y_AbirY_left_FFT); + yRight_UPConvolution.ProcessUPConvolution_withoutIFFT(y, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::Y, Common::T_ear::RIGHT), y_AbirY_right_FFT); + zLeft_UPConvolution.ProcessUPConvolution_withoutIFFT(z, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::Z, Common::T_ear::LEFT), z_AbirZ_left_FFT); + zRight_UPConvolution.ProcessUPConvolution_withoutIFFT(z, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::Z, Common::T_ear::RIGHT), z_AbirZ_right_FFT); + +#endif + + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvConvolver); +#endif + + /////////////////////////////////////// + // Mix of channels in Frequency domain + /////////////////////////////////////// + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleStart(dsEnvInvFFT); +#endif + + mixerOutput_left_FFT.SetFromMix({ w_AbirW_left_FFT, x_AbirX_left_FFT, y_AbirY_left_FFT, z_AbirZ_left_FFT }); + mixerOutput_right_FFT.SetFromMix({ w_AbirW_right_FFT, x_AbirX_right_FFT, y_AbirY_right_FFT, z_AbirZ_right_FFT }); + + //////////////////////////////////////// + // FFT-1 Going back to the time domain + //////////////////////////////////////// + + //TODO All this could be parallelized + + +#ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB + outputLeft.CalculateIFFT_OLA(mixerOutput_left_FFT, mixerOutput_left); + outputRight.CalculateIFFT_OLA(mixerOutput_right_FFT, mixerOutput_right); +#else + //Left channel + Common::CFprocessor::CalculateIFFT(mixerOutput_left_FFT, ouputBuffer_temp); + //We are left only with the final half of the result + int halfsize = (int)(ouputBuffer_temp.size() * 0.5f); + + CMonoBuffer temp_OutputBlockLeft(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); + mixerOutput_left = std::move(temp_OutputBlockLeft); //To use in C++11 + + //Right channel + ouputBuffer_temp.clear(); + Common::CFprocessor::CalculateIFFT(mixerOutput_right_FFT, ouputBuffer_temp); + //We are left only with the final half of the result + halfsize = (int)(ouputBuffer_temp.size() * 0.5f); + CMonoBuffer temp_OutputBlockRight(ouputBuffer_temp.begin() + halfsize, ouputBuffer_temp.end()); + mixerOutput_right = std::move(temp_OutputBlockRight); //To use in C++11 +#endif + + ////////////////////////////////////////////// + // Mix of chabbels decoded after convolution + ////////////////////////////////////////////// + + //Interlace TODO Use the method in bufferClass?? + for (int i = 0; i < mixerOutput_left.size(); i++) { + outBufferLeft.push_back(mixerOutput_left[i]); + outBufferRight.push_back(mixerOutput_right[i]); + } + + +#ifdef USE_PROFILER_Environment + PROFILER3DTI.RelativeSampleEnd(dsEnvInvFFT); +#endif + ////////////////////////////////////////////////////////// + // TO DO: REVERBERATION DELAY + ////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////// + // HA Directionality in reverb path + ////////////////////////////////////////////////////////// + if (ownerCore->GetListener()->IsDirectionalityEnabled(Common::T_ear::LEFT)) { + ProcessDirectionality(outBufferLeft, ownerCore->GetListener()->GetReverbDirectionalityAttenuation_dB(Common::T_ear::LEFT)); + } + if (ownerCore->GetListener()->IsDirectionalityEnabled(Common::T_ear::RIGHT)) { + ProcessDirectionality(outBufferRight, ownerCore->GetListener()->GetReverbDirectionalityAttenuation_dB(Common::T_ear::RIGHT)); + } + + // WATCHER + WATCH(WV_ENVIRONMENT_OUTPUT_LEFT, outBufferLeft, CMonoBuffer); + WATCH(WV_ENVIRONMENT_OUTPUT_RIGHT, outBufferRight, CMonoBuffer); + } + + // Process virtual ambisonic reverb for specified buffers + void CEnvironment::ProcessVirtualAmbisonicReverb(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight) + { + if (!environmentABIR.IsInitialized()) + { + SET_RESULT(RESULT_ERROR_NOTINITIALIZED, "Data is not ready to be processed"); + return; + } + + // This would crash if there are no sources created. Rather than reporting error, do nothing + if (ownerCore->audioSources.size() == 0) + return; + + switch (reverberationOrder) { + case TReverberationOrder::BIDIMENSIONAL: + ProcessVirtualAmbisonicReverbBidimensional(outBufferLeft, outBufferRight); + break; + case TReverberationOrder::THREEDIMENSIONAL: + ProcessVirtualAmbisonicReverbThreedimensional(outBufferLeft, outBufferRight); + break; + case TReverberationOrder::ADIMENSIONAL: + ProcessVirtualAmbisonicReverbAdimensional(outBufferLeft, outBufferRight); + break; + } + } + + void CEnvironment::ProcessDirectionality(CMonoBuffer &buffer, float directionalityAttenutaion) { - // error handler: Trust in called methods for setting result + buffer.ApplyGain(directionalityAttenutaion); + } + + // Process virtual ambisonic reverb for specified buffers + void CEnvironment::ProcessVirtualAmbisonicReverb(CStereoBuffer & outBuffer) + { + CMonoBuffer outLeftBuffer; + CMonoBuffer outRightBuffer; + ProcessVirtualAmbisonicReverb(outLeftBuffer, outRightBuffer); + outBuffer.Interlace(outLeftBuffer, outRightBuffer); + } +////////////////////////////////////////////// + + void CEnvironment::ProcessEncodedChannelReverbThreedimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output) + { CMonoBuffer channel_FFT; CMonoBuffer Convolution_left_FFT; CMonoBuffer Convolution_right_FFT; + // Inverse FFT: Back to time domain CMonoBuffer leftOutputBuffer; CMonoBuffer rightOutputBuffer; - + +#ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB + + //Make FFT and frequency convolution + Common::CFprocessor::GetFFT(encoderIn, channel_FFT, environmentABIR.GetDataLength()); + Common::CFprocessor::ComplexMultiplication(channel_FFT, GetABIR().GetImpulseResponse(channel, T_ear::RIGHT), Convolution_right_FFT); + Common::CFprocessor::ComplexMultiplication(channel_FFT, GetABIR().GetImpulseResponse(channel, T_ear::LEFT), Convolution_left_FFT); + //FFT Inverse + outputLeft.CalculateIFFT_OLA(Convolution_left_FFT, leftOutputBuffer); + outputRight.CalculateIFFT_OLA(Convolution_right_FFT, rightOutputBuffer); +#else + ///UPC Convolution + if (channel == TBFormatChannel::W) + { + wLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), leftOutputBuffer); + wRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), rightOutputBuffer); + } + else if (channel == TBFormatChannel::X) + { + xLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::LEFT), leftOutputBuffer); + xRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::RIGHT), rightOutputBuffer); + } + else if (channel == TBFormatChannel::Y) + { + yLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::LEFT), leftOutputBuffer); + yRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::RIGHT), rightOutputBuffer); + } + else if (channel == TBFormatChannel::Z) { + zLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Z, Common::T_ear::LEFT), leftOutputBuffer); + zRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Z, Common::T_ear::RIGHT), rightOutputBuffer); + } + else { + //Error + } + // Build Stereo buffer + output.FromTwoMonosToStereo(leftOutputBuffer, rightOutputBuffer); +#endif + } + + void CEnvironment::ProcessEncodedChannelReverbBidimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output) + { + CMonoBuffer channel_FFT; + CMonoBuffer Convolution_left_FFT; + CMonoBuffer Convolution_right_FFT; + + // Inverse FFT: Back to time domain + CMonoBuffer leftOutputBuffer; + CMonoBuffer rightOutputBuffer; + #ifdef USE_FREQUENCY_COVOLUTION_WITHOUT_PARTITIONS_REVERB //Make FFT and frequency convolution @@ -521,27 +1339,74 @@ namespace Binaural { #else ///UPC Convolution if (channel == TBFormatChannel::W) - { - wLeft_UPConvolution.ProcessUPConvolution (encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), leftOutputBuffer); - wRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT),rightOutputBuffer); + { + wLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), leftOutputBuffer); + wRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), rightOutputBuffer); } - else if (channel == TBFormatChannel::X) + else if (channel == TBFormatChannel::X) { - xLeft_UPConvolution.ProcessUPConvolution (encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::LEFT), leftOutputBuffer); - xRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::RIGHT),rightOutputBuffer); + xLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::LEFT), leftOutputBuffer); + xRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(X, Common::T_ear::RIGHT), rightOutputBuffer); } - else if (channel == TBFormatChannel::Y) + else if (channel == TBFormatChannel::Y) { - yLeft_UPConvolution.ProcessUPConvolution (encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::LEFT), leftOutputBuffer); - yRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::RIGHT),rightOutputBuffer); + yLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::LEFT), leftOutputBuffer); + yRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(Y, Common::T_ear::RIGHT), rightOutputBuffer); } else { //Error } + // Build Stereo buffer + output.FromTwoMonosToStereo(leftOutputBuffer, rightOutputBuffer); #endif + } + + void CEnvironment::ProcessEncodedChannelReverbAdimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output) + { + CMonoBuffer channel_FFT; + CMonoBuffer Convolution_left_FFT; + CMonoBuffer Convolution_right_FFT; + + // Inverse FFT: Back to time domain + CMonoBuffer leftOutputBuffer; + CMonoBuffer rightOutputBuffer; + + if (channel == TBFormatChannel::W) + { + wLeft_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::LEFT), leftOutputBuffer); + wRight_UPConvolution.ProcessUPConvolution(encoderIn, GetABIR().GetImpulseResponse_Partitioned(TBFormatChannel::W, Common::T_ear::RIGHT), rightOutputBuffer); + } + else { /* Error */ } // Build Stereo buffer - output.FromTwoMonosToStereo(leftOutputBuffer, rightOutputBuffer); + output.FromTwoMonosToStereo(leftOutputBuffer, rightOutputBuffer); + } + + // Process reverb for one b-format channel encoded with 1st order ambisonics (useful for some wrappers) + void CEnvironment::ProcessEncodedChannelReverb(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output) + { + + // error handler: Trust in called methods for setting result + switch (reverberationOrder) { + case TReverberationOrder::BIDIMENSIONAL: + ProcessEncodedChannelReverbBidimensional(channel, encoderIn, output); + + break; + case TReverberationOrder::THREEDIMENSIONAL: + ProcessEncodedChannelReverbThreedimensional(channel, encoderIn, output); + + break; + case TReverberationOrder::ADIMENSIONAL: + ProcessEncodedChannelReverbAdimensional(channel, encoderIn, output); + break; + + } + + } + + void CEnvironment::SetReverberationOrder(TReverberationOrder order) + { + reverberationOrder = order; } //brief Calculate the BRIR again diff --git a/3dti_Toolkit/BinauralSpatializer/Environment.h b/3dti_Toolkit/BinauralSpatializer/Environment.h index 6aad8eae..8c0b3586 100644 --- a/3dti_Toolkit/BinauralSpatializer/Environment.h +++ b/3dti_Toolkit/BinauralSpatializer/Environment.h @@ -39,8 +39,11 @@ enum VirtualSpeakerPosition { SOUTH, ///< SPK3 (south) EAST, ///< SPK4 (east) WEST, ///< SPK2 (west) + ZENIT, ///< SPK (zenit) + NADIR ///< SPK (nadir) }; +enum TReverberationOrder { ADIMENSIONAL, BIDIMENSIONAL, THREEDIMENSIONAL }; namespace Binaural { @@ -83,9 +86,10 @@ namespace Binaural { void ResetReverbBuffers(); /** \brief Configure AIR class (with the partitioned impulse responses) using BRIR data for the UPC algorithm + * \retval boolean to indicate if calculation was successful * \eh Nothing is reported to the error handler. */ - void CalculateABIRPartitioned(); + bool CalculateABIRPartitioned(); /** \brief Configure AIR class using BRIR data for the basic convolution algorithm * \eh Nothing is reported to the error handler. @@ -118,10 +122,43 @@ namespace Binaural { */ void ProcessEncodedChannelReverb(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output); + /** \brief Configures the number of channels of the first-order ambisonic reverb processing + * \details The options are: W, X, Y and Z (3D); W, X and Y (2D); only W (0D) + * \param [in] order TReverberationOrder enum with order option + * \eh Nothing is reported to the error handler. + */ + void SetReverberationOrder(TReverberationOrder order); + + /** \brief Gets the enum variable that allows to configure the number of channels of the first-order ambisonic reverb processing + * \details The options are: W, X, Y and Z (3D); W, X and Y (2D); only W (0D) + * \retval TReverberationOrder enum with order option + * \eh Nothing is reported to the error handler. + */ + TReverberationOrder GetReverberationOrder(); private: - + + //Processes virtual ambisonic reverb in each reverberation order configuration + void ProcessVirtualAmbisonicReverbAdimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight); + void ProcessVirtualAmbisonicReverbBidimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight); + void ProcessVirtualAmbisonicReverbThreedimensional(CMonoBuffer & outBufferLeft, CMonoBuffer & outBufferRight); + + //Processes a reverb encoded channel in each reverberation order configuration. TODO: make unique function + void ProcessEncodedChannelReverbThreedimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output); + void ProcessEncodedChannelReverbBidimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output); + void ProcessEncodedChannelReverbAdimensional(TBFormatChannel channel, CMonoBuffer encoderIn, CMonoBuffer & output); + + //Calculates partitioned ABIR in each reverberation order configuration + bool CalculateABIRPartitionedAdimensional(); + bool CalculateABIRPartitionedBidimensional(); + bool CalculateABIRPartitionedThreedimensional(); + + //Sets ABIR in each reverberation order configuration + void SetABIRAdimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks); + void SetABIRBidimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks); + void SetABIRThreedimensional(int bufferLength, int blockLengthFreq, int numberOfBlocks); + // Set ABIR of environment. Create AIR class using ambisonic codification. Also, initialize convolution buffers - void SetABIR(); + bool SetABIR(); // Calculate the BRIR again void CalculateBRIR(); // Apply the directionality to simulate the hearing aid device @@ -141,16 +178,21 @@ namespace Binaural { Common::CUPCEnvironment wLeft_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution Common::CUPCEnvironment xLeft_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution Common::CUPCEnvironment yLeft_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution + Common::CUPCEnvironment zLeft_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution Common::CUPCEnvironment wRight_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution Common::CUPCEnvironment xRight_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution Common::CUPCEnvironment yRight_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution + Common::CUPCEnvironment zRight_UPConvolution; //Buffers to perform Uniformly Partitioned Convolution + #endif int HADirectionality_LeftChannel_version; //HA Directionality left version int HADirectionality_RightChannel_version; //HA Directionality right version + TReverberationOrder reverberationOrder; + friend class CCore; //Friend class definition friend class CBRIR; }; } -#endif \ No newline at end of file +#endif diff --git a/3dti_Toolkit/BinauralSpatializer/HRTF.cpp b/3dti_Toolkit/BinauralSpatializer/HRTF.cpp index b6c9cade..000323bc 100644 --- a/3dti_Toolkit/BinauralSpatializer/HRTF.cpp +++ b/3dti_Toolkit/BinauralSpatializer/HRTF.cpp @@ -320,8 +320,8 @@ namespace Binaural { if (runTimeInterpolation) { - if (AreSame(_azimuth, sphereBorder, epsilon_sewing)) {_azimuth = 0.0f; } - if (AreSame(_elevation, sphereBorder, epsilon_sewing)) { _elevation = 0.0f;} + if (Common::CMagnitudes::AreSame(_azimuth, sphereBorder, epsilon_sewing)) {_azimuth = 0.0f; } + if (Common::CMagnitudes::AreSame(_elevation, sphereBorder, epsilon_sewing)) { _elevation = 0.0f;} //If we are in the sphere poles, do not perform the interpolation (the HRIR value for this orientations have been calculated with a different method in the resampled methods, because our barycentric interpolation method doesn't work in the poles) int iazimuth = static_cast(round(_azimuth)); @@ -390,7 +390,7 @@ namespace Binaural SET_RESULT(RESULT_ERROR_NOTSET, "GetHRIR_partitioned: HRTF Setup in progress return empty"); } SET_RESULT(RESULT_WARNING, "GetHRIR_partitioned return empty"); - + return *new std::vector>(); }//END GetHRIR_partitioned float CHRTF::GetHRIRDelay(Common::T_ear ear, float _azimuthCenter, float _elevationCenter, bool runTimeInterpolation) @@ -413,8 +413,8 @@ namespace Binaural { if (runTimeInterpolation) { - if (AreSame(_azimuthCenter, sphereBorder, epsilon_sewing)) { _azimuthCenter = 0.0f; } - if (AreSame(_elevationCenter, sphereBorder, epsilon_sewing)) { _elevationCenter = 0.0f; } + if (Common::CMagnitudes::AreSame(_azimuthCenter, sphereBorder, epsilon_sewing)) { _azimuthCenter = 0.0f; } + if (Common::CMagnitudes::AreSame(_elevationCenter, sphereBorder, epsilon_sewing)) { _elevationCenter = 0.0f; } //If we are in the sphere poles, do not perform the interpolation (the HRIR value for this orientations have been calculated with a different method in the resampled methods, because our barycentric interpolation method doesn't work in the poles) int iazimuth = static_cast(round(_azimuthCenter)); @@ -1455,28 +1455,5 @@ namespace Binaural } - const bool CHRTF::AreSame(float a, float b, float epsilon) const - { - //return fabs(a - b) < epsilon_sewing; - float absA = fabs(a); - float absB = fabs(b); - float diff = fabs(a - b); - - return diff < epsilon; - - //if (a == b) { // shortcut, handles infinities - // return true; - //} - //else if (a == 0 || b == 0 || diff < std::numberic_limits::min()) { - // // a or b is zero or both are extremely close to it - // // relative error is less meaningful here - // return diff < (epsilon * std::numberic_limits::min()); - //} - //else { // use relative error - // return diff / fmin((absA + absB), std::numberic_limits::max()) < epsilon; - //} - } - - }//END namespace diff --git a/3dti_Toolkit/BinauralSpatializer/HRTF.h b/3dti_Toolkit/BinauralSpatializer/HRTF.h index 155f8d85..25533032 100644 --- a/3dti_Toolkit/BinauralSpatializer/HRTF.h +++ b/3dti_Toolkit/BinauralSpatializer/HRTF.h @@ -407,9 +407,6 @@ namespace Binaural // Reset HRTF void Reset(); - //Compare two float values - const bool AreSame(float a, float b, float epsilon) const; - friend class CListener; }; diff --git a/3dti_Toolkit/BinauralSpatializer/Listener.cpp b/3dti_Toolkit/BinauralSpatializer/Listener.cpp index 926c9f08..a168a733 100644 --- a/3dti_Toolkit/BinauralSpatializer/Listener.cpp +++ b/3dti_Toolkit/BinauralSpatializer/Listener.cpp @@ -24,7 +24,6 @@ #include #include -#define MINIMUMDISTANCETOSOURCE 0.1f #define ILDATTENUATION -6.0f #define NUM_STEPS_TO_INTEGRATE_CARDIOID_FOR_REVERB 100 @@ -34,7 +33,6 @@ namespace Binaural CListener::CListener(CCore* _ownerCore, float _listenerHeadRadius) :ownerCore{_ownerCore}, listenerHeadRadius{_listenerHeadRadius}, - listenerMinimumDistanceToSource{ MINIMUMDISTANCETOSOURCE }, listenerILDAttenutationDB{ ILDATTENUATION }, enableDirectionality {false, false}, anechoicDirectionalityAttenuation{0.0f, 0.0f}, @@ -165,11 +163,6 @@ namespace Binaural if (listenerHRTF!=nullptr){ listenerHRTF->CalculateNewHRTFTable(); } } - //Get the minimum distance between the listener and any source - float CListener::GetMinimumDistanceToSource() - { - return listenerMinimumDistanceToSource; - } //Get CMagnitudes instance Common::CMagnitudes CListener::GetCoreMagnitudes() const @@ -278,7 +271,10 @@ namespace Binaural float CListener::CalculateDirectionalityAttenuation( float directionalityExtend, float angleToForwardAxis_rad) { - return (-1.0f * directionalityExtend * std::sin( angleToForwardAxis_rad / 2.0f )); + if (directionalityExtend > 30) directionalityExtend = 30.0f; + float directionalityFactor = 0.5f - 0.5f * std::pow(10, -directionalityExtend / 20); + float directionalityAttenuation = 1 - directionalityFactor + (directionalityFactor)* std::cos(angleToForwardAxis_rad); + return (20 * std::log10(directionalityAttenuation)); } int CListener::GetHRTFResamplingStep() const diff --git a/3dti_Toolkit/BinauralSpatializer/Listener.h b/3dti_Toolkit/BinauralSpatializer/Listener.h index acc0d0ff..c573f238 100644 --- a/3dti_Toolkit/BinauralSpatializer/Listener.h +++ b/3dti_Toolkit/BinauralSpatializer/Listener.h @@ -140,12 +140,6 @@ class CHRTF; */ Common::TAudioStateStruct GetCoreAudioState() const; - /** \brief Get the minimum allowed distance between the listener and any source - * \retval distance value in meters that stores the minimum allowed distance between the listener and any source - * \eh Nothing is reported to the error handler. - */ - float GetMinimumDistanceToSource(); - /** \brief Get CMagnitudes instance from owner Core * \retval magnitudes magnitudes object * \eh Nothing is reported to the error handler. @@ -231,7 +225,6 @@ class CHRTF; Common::CTransform listenerTransform; // Transform matrix (position and orientation) of listener float listenerHeadRadius; // Head radius of listener - float listenerMinimumDistanceToSource; // Minimum distante between the listener and the source float listenerILDAttenutationDB; // Attenuation to apply when the ILD is in use (HighPerformance) diff --git a/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.cpp b/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.cpp index 501a69d6..6a8eaf9d 100644 --- a/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.cpp +++ b/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.cpp @@ -242,7 +242,6 @@ namespace Binaural { { ASSERT(_inBuffer.size() == ownerCore->GetAudioState().bufferSize, RESULT_ERROR_BADSIZE, "InBuffer size has to be equal to the input size indicated by the Core::SetAudioState method", ""); - // Check process flag if (!enableAnechoic) { @@ -269,7 +268,15 @@ namespace Binaural { { CMonoBuffer inBuffer = _inBuffer; //We have to copy input buffer to a new buffer because the distance effects methods work changing the input buffer - // Apply Far distance effect + //Check if the source is in the same position as the listener head. If yes, do not apply spatialization + if (distanceToListener <= ownerCore->GetListener()->GetHeadRadius()) + { + outLeftBuffer = inBuffer; + outRightBuffer = inBuffer; + return; + } + + //Apply Far distance effect if (IsFarDistanceEffectEnabled()) { ProcessFarDistanceEffect(inBuffer, distanceToListener); } // Apply distance attenuation @@ -316,27 +323,43 @@ namespace Binaural { // Calculates the values returned by GetEarAzimuth and GetEarElevation void CSingleSourceDSP::CalculateSourceCoordinates() { - // Keep source outside listener head - Common::CTransform safeSourceTransform = CalculateTransformPositionWithRestrictions(sourceTransform); //Get azimuth and elevation between listener and source - vectorToListener = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(safeSourceTransform); - Common::CVector3 leftVectorTo = ownerCore->GetListener()->GetListenerEarTransform(Common::T_ear::LEFT).GetVectorTo(safeSourceTransform); - Common::CVector3 rightVectorTo = ownerCore->GetListener()->GetListenerEarTransform(Common::T_ear::RIGHT).GetVectorTo(safeSourceTransform); + vectorToListener = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(sourceTransform); + + distanceToListener = vectorToListener.GetDistance(); + + //Check listener and source are in the same position + if (distanceToListener <= EPSILON ) { + return; + } + + Common::CVector3 leftVectorTo = ownerCore->GetListener()->GetListenerEarTransform(Common::T_ear::LEFT).GetVectorTo(sourceTransform); + Common::CVector3 rightVectorTo = ownerCore->GetListener()->GetListenerEarTransform(Common::T_ear::RIGHT).GetVectorTo(sourceTransform); Common::CVector3 leftVectorTo_sphereProjection = GetSphereProjectionPosition(leftVectorTo, ownerCore->GetListener()->GetListenerEarLocalPosition(Common::T_ear::LEFT), ownerCore->GetListener()->GetHRTF()->GetHRTFDistanceOfMeasurement()); Common::CVector3 rightVectorTo_sphereProjection = GetSphereProjectionPosition(rightVectorTo, ownerCore->GetListener()->GetListenerEarLocalPosition(Common::T_ear::RIGHT), ownerCore->GetListener()->GetHRTF()->GetHRTFDistanceOfMeasurement()); - leftAzimuth = leftVectorTo_sphereProjection.GetAzimuthDegrees(); //Get left azimuth - leftElevation = leftVectorTo_sphereProjection.GetElevationDegrees(); //Get left elevation - - rightAzimuth = rightVectorTo_sphereProjection.GetAzimuthDegrees(); //Get right azimuth + leftElevation = leftVectorTo_sphereProjection.GetElevationDegrees(); //Get left elevation + if (!Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_UP, leftElevation, EPSILON) && !Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_DOWN, leftElevation, EPSILON)) + { + leftAzimuth = leftVectorTo_sphereProjection.GetAzimuthDegrees(); //Get left azimuth + } + rightElevation = rightVectorTo_sphereProjection.GetElevationDegrees(); //Get right elevation + if (!Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_UP, rightElevation, EPSILON) && !Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_DOWN, rightElevation, EPSILON)) + { + rightAzimuth = rightVectorTo_sphereProjection.GetAzimuthDegrees(); //Get right azimuth + } - distanceToListener = vectorToListener.GetDistance(); //Get Distance - interauralAzimuth = vectorToListener.GetInterauralAzimuthDegrees(); //Get Interaural Azimuth - centerAzimuth = vectorToListener.GetAzimuthDegrees(); //Get azimuth from the head center centerElevation = vectorToListener.GetElevationDegrees(); //Get elevation from the head center + if (!Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_UP, centerElevation, EPSILON) && !Common::CMagnitudes::AreSame(ELEVATION_SINGULAR_POINT_DOWN, centerElevation, EPSILON)) + { + centerAzimuth = vectorToListener.GetAzimuthDegrees(); //Get azimuth from the head center + } + + interauralAzimuth = vectorToListener.GetInterauralAzimuthDegrees(); //Get Interaural Azimuth + } // Returns the azimuth of the specified ear. @@ -346,8 +369,11 @@ namespace Binaural { return leftAzimuth; else if ( ear == Common::T_ear::RIGHT ) return rightAzimuth; - else - SET_RESULT(RESULT_ERROR_INVALID_PARAM, "Call to CSingleSourceDSP::GetEarAzimuth with invalid param" ); + else + { + SET_RESULT(RESULT_ERROR_INVALID_PARAM, "Call to CSingleSourceDSP::GetEarAzimuth with invalid param" ); + return 0.0f; + } } // Returns the elevation of the specified ear @@ -357,8 +383,11 @@ namespace Binaural { return leftElevation; else if (ear == Common::T_ear::RIGHT) return rightElevation; - else - SET_RESULT( RESULT_ERROR_INVALID_PARAM, "Call to CSingleSourceDSP::GetEarAzimuth with invalid param" ); + else + { + SET_RESULT( RESULT_ERROR_INVALID_PARAM, "Call to CSingleSourceDSP::GetEarElevation with invalid param" ); + return 0.0f; + } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -658,47 +687,6 @@ namespace Binaural { #endif } - ///Calculate a new source transform position taking into account the restrictions, source that can not be inside of the listener head - Common::CTransform CSingleSourceDSP::CalculateTransformPositionWithRestrictions(Common::CTransform sourceTransform) - { - float sourceMinimumDistance = 0.1f; //Default value, just in case - if (ownerCore!=nullptr){ - if (ownerCore->GetListener() != nullptr) { - - //Get the minimum distance possible - sourceMinimumDistance = ownerCore->GetListener()->GetMinimumDistanceToSource(); - //Get distance between listener and source - Common::CVector3 vectorToSource = ownerCore->GetListener()->GetListenerTransform().GetVectorTo(sourceTransform); - float sourceDistance = vectorToSource.GetDistance(); // OPTIMIZATION: we could use GetSqrDistance instead - //Check if the distance is less than the minumun - if (sourceDistance < sourceMinimumDistance) - { - //How to project a point on to a sphere - //Let p be the point (source position), s the sphere's centre (listener position) and r the radius (minimum distance) - //then x = s + r*(p-s)/(norm(p-s)) where x is the point projected (new source position) - //Get s (listner position) - Common::CTransform listernerTransform = ownerCore->GetListener()->GetListenerTransform(); - Common::CVector3 listenerPosition = listernerTransform.GetPosition(); - if (sourceDistance != 0.0f) { - //Get projectionVector vector - float scale = sourceMinimumDistance / sourceDistance; - lastSourceProjectionVector = Common::CVector3(scale * vectorToSource.x, scale * vectorToSource.y, scale * vectorToSource.z); - } - //New position - Common::CVector3 newSourcePosition = listenerPosition + lastSourceProjectionVector; - //Create a return the new source transform - Common::CTransform newSourceTransform; - newSourceTransform.SetPosition(newSourcePosition); - return newSourceTransform; - } - else - { - return sourceTransform; - } - } - } - return sourceTransform; - } ///Return the flag which tells if the buffer is updated and ready for a new anechoic process bool CSingleSourceDSP::IsAnechoicProcessReady() { return readyForAnechoic; } diff --git a/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.h b/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.h index 43e7be26..9660fea1 100644 --- a/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.h +++ b/3dti_Toolkit/BinauralSpatializer/SingleSourceDSP.h @@ -36,6 +36,9 @@ #include //#define USE_UPC_WITHOUT_MEMORY +#define EPSILON 0.0001f +#define ELEVATION_SINGULAR_POINT_UP 90.0 +#define ELEVATION_SINGULAR_POINT_DOWN 270.0 namespace Binaural { @@ -300,8 +303,6 @@ namespace Binaural { void ProcessAddDelay_ExpansionMethod(CMonoBuffer& input, CMonoBuffer& output, CMonoBuffer& delayBuffer, int newDelay); // Reset source convolution buffers void ResetSourceConvolutionBuffers(shared_ptr listener); - // Calculate a new source transform position taking into account the restrictions, source that can not be inside of the listener head - Common::CTransform CalculateTransformPositionWithRestrictions(Common::CTransform newSourceTransform); // return the flag which tells if the buffer is updated and ready for a new anechoic process bool IsAnechoicProcessReady(); // return the flag which tells if the buffer is updated and ready for a new reverb process diff --git a/3dti_Toolkit/Common/AIR.h b/3dti_Toolkit/Common/AIR.h index 5b7deb1e..b3d7ffea 100644 --- a/3dti_Toolkit/Common/AIR.h +++ b/3dti_Toolkit/Common/AIR.h @@ -24,7 +24,7 @@ #define _CAIR_H_ #include -#include +#include #include #include #include diff --git a/3dti_Toolkit/Common/Conventions.h b/3dti_Toolkit/Common/Conventions.h index 4b4a14f6..ae0b8ed4 100644 --- a/3dti_Toolkit/Common/Conventions.h +++ b/3dti_Toolkit/Common/Conventions.h @@ -76,6 +76,8 @@ typedef int TCircularMotion; ///< Type definition for defining spherical motion #define UP_AXIS AXIS_Y ///< In Unity 5.x, Y is the UP direction #define RIGHT_AXIS AXIS_X ///< In Unity 5.x, X is the RIGHT direction #define FORWARD_AXIS AXIS_Z ///< In Unity 5.x, Z is the FORWARD direction + #define LEFT_AXIS AXIS_MINUS_X ///< In Unity 5.x, X is the RIGHT direction + #elif defined ( _3DTI_AXIS_CONVENTION_OPENFRAMEWORK ) #define UP_AXIS AXIS_MINUS_Z ///< In Open Framework test apps, -Z is the UP direction diff --git a/3dti_Toolkit/Common/FarDistanceEffects.cpp b/3dti_Toolkit/Common/FarDistanceEffects.cpp index 5138f9f8..ce4e5ceb 100644 --- a/3dti_Toolkit/Common/FarDistanceEffects.cpp +++ b/3dti_Toolkit/Common/FarDistanceEffects.cpp @@ -29,7 +29,7 @@ // Q for LPF #define LPF_Q 1.414213562 -#define NUM_OF_BIQUAD_FILTERS_FOR_FAR_DISTANCE_FILTERING 1 +#define NUM_OF_BIQUAD_FILTERS_FOR_FAR_DISTANCE_FILTERING 2 // The default function that provides the cutoff frequency that models the distortion of far sound sources // follows this expression: Fc = A · 10^-B(distance - C) @@ -103,12 +103,20 @@ namespace Common { if (distance > DISTANCE_MODEL_THRESHOLD_FAR) { + /* // See comments in definition of constants CUT_OFF_FREQUENCY_FUNCTION__COEF_X float A = CUT_OFF_FREQUENCY_FUNCTION__COEF_A; float B = CUT_OFF_FREQUENCY_FUNCTION__COEF_B; float C = CUT_OFF_FREQUENCY_FUNCTION__COEF_C; - return A * std::pow(10, -B * (distance - C)); + return A * std::pow(10, -B * (distance - C));*/ + + float c_pow = 2.0f, c_div = 7100.0f, ax = 100.0f, dmin = 15.0f, dmax = 100.0f; + float b = exp(pow(dmax - dmin, c_pow) / c_div); + if (distance > dmax) distance = dmax; + return ((20000 / b) * exp((pow(dmax - distance, c_pow)) / c_div)); + + } else return NO_FILTERING_CUT_OFF_FREQUENCY; diff --git a/3dti_Toolkit/Common/Magnitudes.cpp b/3dti_Toolkit/Common/Magnitudes.cpp index 9273474e..4f35575d 100644 --- a/3dti_Toolkit/Common/Magnitudes.cpp +++ b/3dti_Toolkit/Common/Magnitudes.cpp @@ -23,7 +23,7 @@ #include #include -#include +#include #define DEFAULT_REVERB_ATTENUATION_DB -3.01f ///< Default reverb attenuation with distance, in decibels #define DEFAULT_ANECHOIC_ATTENUATION_DB -6.0206f ///< log10f(0.5f) * 20.0f Default anechoic attenuation with distance, in decibels @@ -112,4 +112,14 @@ namespace Common { { return reverbAttenuationDB; } + + bool CMagnitudes::AreSame(float a, float b, float epsilon) + { + float absA = fabs(a); + float absB = fabs(b); + float diff = fabs(a - b); + + return diff < epsilon; + } + }//end namespace Common \ No newline at end of file diff --git a/3dti_Toolkit/Common/Magnitudes.h b/3dti_Toolkit/Common/Magnitudes.h index d2b1ac40..331fad75 100644 --- a/3dti_Toolkit/Common/Magnitudes.h +++ b/3dti_Toolkit/Common/Magnitudes.h @@ -85,6 +85,12 @@ namespace Common { */ float GetSoundSpeed() const; + /** \brief Compare two float values + * \retval if both float are equals + * \eh Nothing is reported to the error handler. + */ + static bool AreSame(float a, float b, float epsilon); + // ATTRIBUTES: private: float anechoicAttenuationDB; // Constant for modeling the attenuation due to distance in anechoic process, in decibel units diff --git a/3dti_Toolkit/HAHLSimulation/HearingLossSim.cpp b/3dti_Toolkit/HAHLSimulation/HearingLossSim.cpp index 4da96027..c2df7762 100644 --- a/3dti_Toolkit/HAHLSimulation/HearingLossSim.cpp +++ b/3dti_Toolkit/HAHLSimulation/HearingLossSim.cpp @@ -86,8 +86,8 @@ namespace HAHLSimulation { void CHearingLossSim::SetHearingLevel_dBHL(Common::T_ear ear, int bandIndex, float hearingLevel_dBHL) { - if (hearingLevel_dBHL > 100) - hearingLevel_dBHL = 100; + if (hearingLevel_dBHL > 99) + hearingLevel_dBHL = 99; // Check band index ASSERT((bandIndex >= 0) && (bandIndex < audiometries.left.size()), RESULT_ERROR_OUTOFRANGE, "Attempt to set hearing level for a wrong band number", "Band for hearing level is correct"); @@ -347,8 +347,8 @@ namespace HAHLSimulation { float CHearingLossSim::CalculateThresholdFromDBHL(float dBHL) { float limitedDBHL = dBHL; - if (limitedDBHL > 160.0f) - limitedDBHL = 160.0f; + if (limitedDBHL > 120.0f) + limitedDBHL = 120.0f; return T100 - A100 + (A100*limitedDBHL) * 0.01f; } diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ea116e6..b4d47cb5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,44 @@ All notable changes to the 3DTuneIn Toolkit will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/). +## [M20180705] - AudioToolkit_v1.1_20180705 + +### Binaural + +`Removed` + - Removed function CListener::GetMinimumDistanceToSource + * old: float CListener::GetMinimumDistanceToSource() + +`Changed` + - Modified CBRIR::AddBRIR to be a method that returns a boolean value indicating if the BRIR has been added correctly + * old: ~~void~~ CBRIR::AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR) + * new: **bool** CBRIR::AddBRIR(VirtualSpeakerPosition vsPosition, Common::T_ear vsChannel, TImpulseResponse && newBRIR) + - Modified CBRIR::EndSetup to be a method that returns a boolean value indicating if setup has been successful + * old: ~~void~~ CBRIR::AddBRIR() + * new: **bool** CBRIR::AddBRIR() + - Modified CEnvironment::CalculateABIRPartitioned to be a method that returns a boolean value indicating if ABIR has been calculated correctly + * old: ~~void~~ CEnvironment::CalculateABIRPartitioned() + * new: **bool** CEnvironment::CalculateABIRPartitioned() + - Modified far distance effect, now uses a different cutoff frequency calculation and low-pass filtering + - Modified directionality attenuation calculation + +`Added` + - New enumeration type TReverberationOrder in CEnvironment + * enumerators: ADIMENSIONAL (to only process W channel), BIDIMENSIONAL (to only process X, Y and W channels), TRIDIMENSIONAL (to process X, Y, Z and W channels) + - New method to set the reverberation order in CEnvironment + * new: CEnvironment::SetReverberationOrder(TReverberationOrder order) + - New boolean function to know if a partitioned impulse response is empty + * new: bool CBRIR::IsIREmpty(const TImpulseResponse_Partitioned& in) + - New function to get the reverberation order in CEnvironment + * new: TReverberationOrder CEnvironment::GetReverberationOrder() + - New static boolean function CMagnitudes::AreSame to know if two float values (a and b) have same value within a margin specified by epsilon + * new: static bool CMagnitudes::AreSame(float a, float b, float epsilon) + +### HAHLSimulation +`Changed` + - Modified Audiometry maximum attenuation to 120 dB + + ## [M20180319] - AudioToolkit_v1.0_20180319 ### Binaural