From 5c230c53a255e1b92f939c8e63ce2341f9e8cb7f Mon Sep 17 00:00:00 2001 From: Martijn Laarman Date: Fri, 5 Dec 2014 14:51:22 +0100 Subject: [PATCH] fix #1080 keep track off timeout over multiple delegated calls into cluster Conflicts: src/Elasticsearch.Net/Connection/Configuration/IConnectionConfigurationValues.cs src/Elasticsearch.Net/Connection/RequestHandlers/RequestHandlerBase.cs src/Tests/Elasticsearch.Net.Tests.Unit/Elasticsearch.Net.Tests.Unit.csproj --- .../Configuration/ConnectionConfiguration.cs | 16 ++ .../IConnectionConfigurationValues.cs | 20 +++ .../Connection/ITransportDelegator.cs | 7 + .../RequestHandlers/RequestHandlerBase.cs | 55 ++++--- .../RequestState/ITransportRequestState.cs | 1 + src/Elasticsearch.Net/Connection/Transport.cs | 24 ++- .../SniffingConnectionPoolTests.cs | 2 +- .../Elasticsearch.Net.Tests.Unit.csproj | 2 + .../DontRetryAfterDefaultTimeoutTests.cs | 141 +++++++++++++++++ .../DontRetryAfterMaxRetryTimeoutTests.cs | 143 ++++++++++++++++++ 10 files changed, 391 insertions(+), 20 deletions(-) create mode 100644 src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterDefaultTimeoutTests.cs create mode 100644 src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterMaxRetryTimeoutTests.cs diff --git a/src/Elasticsearch.Net/Connection/Configuration/ConnectionConfiguration.cs b/src/Elasticsearch.Net/Connection/Configuration/ConnectionConfiguration.cs index 5c3244329e2..d1da359822a 100644 --- a/src/Elasticsearch.Net/Connection/Configuration/ConnectionConfiguration.cs +++ b/src/Elasticsearch.Net/Connection/Configuration/ConnectionConfiguration.cs @@ -61,6 +61,9 @@ public class ConnectionConfiguration : IConnectionConfigurationValues, IHideO private int? _maxDeadTimeout; int? IConnectionConfigurationValues.MaxDeadTimeout { get{ return _maxDeadTimeout; } } + private TimeSpan? _maxRetryTimeout; + TimeSpan? IConnectionConfigurationValues.MaxRetryTimeout { get{ return _maxRetryTimeout; } } + private string _proxyUsername; string IConnectionConfigurationValues.ProxyUsername { get{ return _proxyUsername; } } @@ -278,6 +281,19 @@ public T SetMaxDeadTimeout(int timeout) this._maxDeadTimeout = timeout; return (T) this; } + + /// + /// Limits the total runtime including retries separately from + ///
+		/// When not specified defaults to  which itself defaults to 60seconds
+		/// 
+ ///
+ public T SetMaxRetryTimeout(TimeSpan maxRetryTimeout) + { + this._maxRetryTimeout = maxRetryTimeout; + return (T) this; + } + /// /// Semaphore asynchronous connections automatically by giving /// it a maximum concurrent connections. diff --git a/src/Elasticsearch.Net/Connection/Configuration/IConnectionConfigurationValues.cs b/src/Elasticsearch.Net/Connection/Configuration/IConnectionConfigurationValues.cs index 3e8a05917ba..be8f5d68256 100644 --- a/src/Elasticsearch.Net/Connection/Configuration/IConnectionConfigurationValues.cs +++ b/src/Elasticsearch.Net/Connection/Configuration/IConnectionConfigurationValues.cs @@ -6,16 +6,36 @@ namespace Elasticsearch.Net.Connection { + //TODO change timeouts to TimeSpans in 2.0? + public interface IConnectionConfigurationValues { IConnectionPool ConnectionPool { get; } int MaximumAsyncConnections { get; } int Timeout { get; } + + /// + /// The timeout in milliseconds to use for ping calls that are issues to check whether a node is up or not. + /// int? PingTimeout { get; } + int? DeadTimeout { get; } int? MaxDeadTimeout { get; } int? MaxRetries { get; } + + /// + /// Limits the total runtime including retries separately from + ///
+		/// When not specified defaults to  which itself defaults to 60seconds
+		/// 
+ ///
+ TimeSpan? MaxRetryTimeout { get; } + + /// + /// This signals that we do not want to send initial pings to unknown/previously dead nodes + /// and just send the call straightaway + /// bool DisablePings { get; } bool EnableCompressedResponses { get; } diff --git a/src/Elasticsearch.Net/Connection/ITransportDelegator.cs b/src/Elasticsearch.Net/Connection/ITransportDelegator.cs index fb91ffb5234..56fd2c0fad2 100644 --- a/src/Elasticsearch.Net/Connection/ITransportDelegator.cs +++ b/src/Elasticsearch.Net/Connection/ITransportDelegator.cs @@ -26,6 +26,13 @@ internal interface ITransportDelegator bool SniffingDisabled(IRequestConfiguration requestConfiguration); bool SniffOnFaultDiscoveredMoreNodes(ITransportRequestState requestState, int retried, ElasticsearchResponse streamResponse); + + /// + /// Returns whether the current delegation over nodes took too long and we should quit. + /// if is set we'll use that timeout otherwise we default to th value of + /// which itself defaults to 60 seconds + /// + bool TookTooLongToRetry(ITransportRequestState requestState); /// /// Selects next node uri on request state diff --git a/src/Elasticsearch.Net/Connection/RequestHandlers/RequestHandlerBase.cs b/src/Elasticsearch.Net/Connection/RequestHandlers/RequestHandlerBase.cs index 1764e589925..14059656f85 100644 --- a/src/Elasticsearch.Net/Connection/RequestHandlers/RequestHandlerBase.cs +++ b/src/Elasticsearch.Net/Connection/RequestHandlers/RequestHandlerBase.cs @@ -16,6 +16,7 @@ internal class RequestHandlerBase { protected const int BufferSize = 4096; protected static readonly string MaxRetryExceptionMessage = "Failed after retrying {2} times: '{0} {1}'. {3}"; + protected static readonly string TookTooLongExceptionMessage = "Retry timeout {4} was hit after retrying {2} times: '{0} {1}'. {3}"; protected static readonly string MaxRetryInnerMessage = "InnerException: {0}, InnerMessage: {1}, InnerStackTrace: {2}"; protected readonly IConnectionConfigurationValues _settings; @@ -123,39 +124,59 @@ protected bool DoneProcessing( protected void ThrowMaxRetryExceptionWhenNeeded(TransportRequestState requestState, int maxRetries) { - if (requestState.Retried < maxRetries) return; + var tookToLong = this._delegator.TookTooLongToRetry(requestState); + + //not out of date and we havent depleted our retries, get the hell out of here + if (!tookToLong && requestState.Retried < maxRetries) return; + var innerExceptions = requestState.SeenExceptions.Where(e => e != null).ToList(); var innerException = !innerExceptions.HasAny() ? null : (innerExceptions.Count() == 1) ? innerExceptions.First() : new AggregateException(requestState.SeenExceptions); - var exceptionMessage = CreateMaxRetryExceptionMessage(requestState, innerException); + + //When we are not using pooling we forcefully rethrow the exception + //and never wrap it in a maxretry exception + if (!requestState.UsingPooling && innerException != null) + throw innerException; + + var exceptionMessage = tookToLong + ? CreateTookTooLongExceptionMessage(requestState, innerException) + : CreateMaxRetryExceptionMessage(requestState, innerException); throw new MaxRetryException(exceptionMessage, innerException); } + protected string CreateInnerExceptionMessage(TransportRequestState requestState, Exception e) + { + if (e == null) return null; + var aggregate = e as AggregateException; + if (aggregate == null) + return "\r\n" + MaxRetryInnerMessage.F(e.GetType().Name, e.Message, e.StackTrace); + aggregate = aggregate.Flatten(); + var innerExceptions = aggregate.InnerExceptions + .Select(ae => MaxRetryInnerMessage.F(ae.GetType().Name, ae.Message, ae.StackTrace)) + .ToList(); + return "\r\n" + string.Join("\r\n", innerExceptions); + } + protected string CreateMaxRetryExceptionMessage(TransportRequestState requestState, Exception e) { - string innerException = null; - if (e != null) - { - var aggregate = e as AggregateException; - if (aggregate != null) - { - aggregate = aggregate.Flatten(); - var innerExceptions = aggregate.InnerExceptions - .Select(ae => MaxRetryInnerMessage.F(ae.GetType().Name, ae.Message, ae.StackTrace)) - .ToList(); - innerException = "\r\n" + string.Join("\r\n", innerExceptions); - } - else - innerException = "\r\n" + MaxRetryInnerMessage.F(e.GetType().Name, e.Message, e.StackTrace); - } + string innerException = CreateInnerExceptionMessage(requestState, e); var exceptionMessage = MaxRetryExceptionMessage .F(requestState.Method, requestState.Path, requestState.Retried, innerException); return exceptionMessage; } + protected string CreateTookTooLongExceptionMessage(TransportRequestState requestState, Exception e) + { + string innerException = CreateInnerExceptionMessage(requestState, e); + var timeout = this._settings.MaxRetryTimeout.GetValueOrDefault(TimeSpan.FromMilliseconds(this._settings.Timeout)); + var exceptionMessage = TookTooLongExceptionMessage + .F(requestState.Method, requestState.Path, requestState.Retried, innerException, timeout); + return exceptionMessage; + } + protected void OptionallyCloseResponseStreamAndSetSuccess( ITransportRequestState requestState, ElasticsearchServerError error, diff --git a/src/Elasticsearch.Net/Connection/RequestState/ITransportRequestState.cs b/src/Elasticsearch.Net/Connection/RequestState/ITransportRequestState.cs index dc7d58e4643..5ac41a4a28f 100644 --- a/src/Elasticsearch.Net/Connection/RequestState/ITransportRequestState.cs +++ b/src/Elasticsearch.Net/Connection/RequestState/ITransportRequestState.cs @@ -11,6 +11,7 @@ public interface ITransportRequestState Uri CreatePathOnCurrentNode(string path); IRequestConfiguration RequestConfiguration { get; } int Retried { get; } + DateTime StartedOn { get; } bool SniffedOnConnectionFailure { get; set; } int? Seed { get; set; } Uri CurrentNode { get; set; } diff --git a/src/Elasticsearch.Net/Connection/Transport.cs b/src/Elasticsearch.Net/Connection/Transport.cs index 1a67052e9d0..676218fa48f 100644 --- a/src/Elasticsearch.Net/Connection/Transport.cs +++ b/src/Elasticsearch.Net/Connection/Transport.cs @@ -99,7 +99,7 @@ bool ITransportDelegator.Ping(ITransportRequestState requestState) using (response.Response) return response.Success; } - catch(ElasticsearchAuthenticationException) + catch (ElasticsearchAuthenticationException) { throw; } @@ -188,7 +188,7 @@ IList ITransportDelegator.Sniff(ITransportRequestState ownerState = null) } if (response.HttpStatusCode.HasValue && response.HttpStatusCode == (int)HttpStatusCode.Unauthorized) throw new ElasticsearchAuthenticationException(response); - if (response.Response == null) + if (response.Response == null) return null; using (response.Response) @@ -246,6 +246,26 @@ void ITransportDelegator.SniffOnConnectionFailure(ITransportRequestState request /* REQUEST STATE *** ********************************************/ + /// + /// Returns whether the current delegation over nodes took too long and we should quit. + /// if is set we'll use that timeout otherwise we default to th value of + /// which itself defaults to 60 seconds + /// + bool ITransportDelegator.TookTooLongToRetry(ITransportRequestState requestState) + { + var timeout = this.Settings.MaxRetryTimeout.GetValueOrDefault(TimeSpan.FromMilliseconds(this.Settings.Timeout)); + var startedOn = requestState.StartedOn; + var now = this._dateTimeProvider.Now(); + + //we apply a soft margin so that if a request timesout at 59 seconds when the maximum is 60 + //we also abort. + var margin = (timeout.TotalMilliseconds / 100.0) * 98; + var marginTimeSpan = TimeSpan.FromMilliseconds(margin); + var timespanCall = (now - startedOn); + var tookToLong = timespanCall >= marginTimeSpan; + return tookToLong; + } + /// /// Returns either the fixed maximum set on the connection configuration settings or the number of nodes /// diff --git a/src/Tests/Elasticsearch.Net.Tests.Unit/ConnectionPools/SniffingConnectionPoolTests.cs b/src/Tests/Elasticsearch.Net.Tests.Unit/ConnectionPools/SniffingConnectionPoolTests.cs index 79730c7d77f..3a8c7392ba2 100644 --- a/src/Tests/Elasticsearch.Net.Tests.Unit/ConnectionPools/SniffingConnectionPoolTests.cs +++ b/src/Tests/Elasticsearch.Net.Tests.Unit/ConnectionPools/SniffingConnectionPoolTests.cs @@ -184,7 +184,7 @@ public void SniffOnConnectionFaultCausesSniffOn503() Assert.Throws(()=>client1.Info()); //info call 5 sniffCall.MustHaveHappened(Repeated.Exactly.Once); - nowCall.MustHaveHappened(Repeated.Exactly.Times(8)); + nowCall.MustHaveHappened(Repeated.Exactly.Times(10)); } } diff --git a/src/Tests/Elasticsearch.Net.Tests.Unit/Elasticsearch.Net.Tests.Unit.csproj b/src/Tests/Elasticsearch.Net.Tests.Unit/Elasticsearch.Net.Tests.Unit.csproj index 9ed3e8a681b..62b247f57f9 100644 --- a/src/Tests/Elasticsearch.Net.Tests.Unit/Elasticsearch.Net.Tests.Unit.csproj +++ b/src/Tests/Elasticsearch.Net.Tests.Unit/Elasticsearch.Net.Tests.Unit.csproj @@ -62,6 +62,8 @@ + + diff --git a/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterDefaultTimeoutTests.cs b/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterDefaultTimeoutTests.cs new file mode 100644 index 00000000000..7fc14c38917 --- /dev/null +++ b/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterDefaultTimeoutTests.cs @@ -0,0 +1,141 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Autofac; +using Autofac.Extras.FakeItEasy; +using Elasticsearch.Net.Connection; +using Elasticsearch.Net.Connection.Configuration; +using Elasticsearch.Net.ConnectionPool; +using Elasticsearch.Net.Exceptions; +using Elasticsearch.Net.Providers; +using Elasticsearch.Net.Tests.Unit.Stubs; +using FakeItEasy; +using FluentAssertions; +using NUnit.Framework; + +namespace Elasticsearch.Net.Tests.Unit.Failover.Timeout +{ + [TestFixture] + public class DontRetryAfterDefaultTimeoutTests + { + [Test] + public void FailEarlyIfTimeoutIsExhausted() + { + using (var fake = new AutoFake()) + { + var dateTimeProvider = ProvideDateTimeProvider(fake); + var config = ProvideConfiguration(dateTimeProvider); + var connection = ProvideConnection(fake, config, dateTimeProvider); + + var getCall = FakeCalls.GetSyncCall(fake); + var ok = FakeResponse.Ok(config); + var bad = FakeResponse.Bad(config); + getCall.ReturnsNextFromSequence( + bad, //info 1 - 9204 + bad, //info 2 - 9203 DEAD + ok //info 2 retry - 9202 + ); + + var seenNodes = new List(); + getCall.Invokes((Uri u, IRequestConfiguration o) => seenNodes.Add(u)); + + var pingCall = FakeCalls.PingAtConnectionLevel(fake); + pingCall.Returns(ok); + + var client1 = fake.Resolve(); + + //event though the third node should have returned ok, the first 2 calls took a minute + var e = Assert.Throws(() => client1.Info()); + e.Message.Should() + .StartWith("Retry timeout 00:01:00 was hit after retrying 1 times:"); + + IElasticsearchResponse response = null; + Assert.DoesNotThrow(() => response = client1.Info() ); + response.Should().NotBeNull(); + response.Success.Should().BeTrue(); + + } + } + + [Test] + public void FailEarlyIfTimeoutIsExhausted_Async() + { + using (var fake = new AutoFake()) + { + var dateTimeProvider = ProvideDateTimeProvider(fake); + var config = ProvideConfiguration(dateTimeProvider); + var connection = ProvideConnection(fake, config, dateTimeProvider); + + var getCall = FakeCalls.GetCall(fake); + var ok = Task.FromResult(FakeResponse.Ok(config)); + var bad = Task.FromResult(FakeResponse.Bad(config)); + getCall.ReturnsNextFromSequence( + bad, + bad, + ok + ); + + var seenNodes = new List(); + getCall.Invokes((Uri u, IRequestConfiguration o) => seenNodes.Add(u)); + + var pingCall = FakeCalls.PingAtConnectionLevelAsync(fake); + pingCall.Returns(ok); + + var client1 = fake.Resolve(); + //event though the third node should have returned ok, the first 2 calls took a minute + var e = Assert.Throws(async () => await client1.InfoAsync()); + e.Message.Should() + .StartWith("Retry timeout 00:01:00 was hit after retrying 1 times:"); + + IElasticsearchResponse response = null; + Assert.DoesNotThrow(async () => response = await client1.InfoAsync() ); + response.Should().NotBeNull(); + response.Success.Should().BeTrue(); + } + } + + private static IConnection ProvideConnection(AutoFake fake, ConnectionConfiguration config, IDateTimeProvider dateTimeProvider) + { + fake.Provide(config); + var param = new TypedParameter(typeof(IDateTimeProvider), dateTimeProvider); + var transport = fake.Provide(param); + var connection = fake.Resolve(); + return connection; + } + + private static ConnectionConfiguration ProvideConfiguration(IDateTimeProvider dateTimeProvider) + { + var connectionPool = new StaticConnectionPool(new[] + { + new Uri("http://localhost:9204"), + new Uri("http://localhost:9203"), + new Uri("http://localhost:9202"), + new Uri("http://localhost:9201") + }, randomizeOnStartup: false, dateTimeProvider: dateTimeProvider); + var config = new ConnectionConfiguration(connectionPool).EnableMetrics(); + return config; + } + + private static IDateTimeProvider ProvideDateTimeProvider(AutoFake fake) + { + var now = DateTime.UtcNow; + var dateTimeProvider = fake.Resolve(); + var nowCall = A.CallTo(() => dateTimeProvider.Now()); + nowCall.ReturnsNextFromSequence( + now, //initital sniff now from constructor + now, //pool select next node + now.AddSeconds(30), //info 1 took to long? + now.AddSeconds(30), //pool select next node? + now.AddMinutes(1) //info 2 took to long? + ); + A.CallTo(() => dateTimeProvider.AliveTime(A._, A._)).Returns(new DateTime()); + //dead time will return a fixed timeout of 1 minute + A.CallTo(() => dateTimeProvider.DeadTime(A._, A._, A._, A._)) + .Returns(DateTime.UtcNow.AddMinutes(1)); + //make sure the transport layer uses a different datetimeprovider + fake.Provide(new DateTimeProvider()); + return dateTimeProvider; + } + } +} diff --git a/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterMaxRetryTimeoutTests.cs b/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterMaxRetryTimeoutTests.cs new file mode 100644 index 00000000000..a6e224642bd --- /dev/null +++ b/src/Tests/Elasticsearch.Net.Tests.Unit/Failover/Timeout/DontRetryAfterMaxRetryTimeoutTests.cs @@ -0,0 +1,143 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Autofac; +using Autofac.Extras.FakeItEasy; +using Elasticsearch.Net.Connection; +using Elasticsearch.Net.Connection.Configuration; +using Elasticsearch.Net.ConnectionPool; +using Elasticsearch.Net.Exceptions; +using Elasticsearch.Net.Providers; +using Elasticsearch.Net.Tests.Unit.Stubs; +using FakeItEasy; +using FluentAssertions; +using NUnit.Framework; + +namespace Elasticsearch.Net.Tests.Unit.Failover.Timeout +{ + [TestFixture] + public class DontRetryAfterMaxRetryTimeoutTests + { + [Test] + public void FailEarlyIfTimeoutIsExhausted() + { + using (var fake = new AutoFake()) + { + var dateTimeProvider = ProvideDateTimeProvider(fake); + var config = ProvideConfiguration(dateTimeProvider); + var connection = ProvideConnection(fake, config, dateTimeProvider); + + var getCall = FakeCalls.GetSyncCall(fake); + var ok = FakeResponse.Ok(config); + var bad = FakeResponse.Bad(config); + getCall.ReturnsNextFromSequence( + bad, //info 1 - 9204 + bad, //info 2 - 9203 DEAD + ok //info 2 retry - 9202 + ); + + var seenNodes = new List(); + getCall.Invokes((Uri u, IRequestConfiguration o) => seenNodes.Add(u)); + + var pingCall = FakeCalls.PingAtConnectionLevel(fake); + pingCall.Returns(ok); + + var client1 = fake.Resolve(); + + //event though the third node should have returned ok, the first 2 calls took a minute + var e = Assert.Throws(() => client1.Info()); + e.Message.Should() + .StartWith("Retry timeout 00:01:20 was hit after retrying 1 times:"); + + IElasticsearchResponse response = null; + Assert.DoesNotThrow(() => response = client1.Info() ); + response.Should().NotBeNull(); + response.Success.Should().BeTrue(); + + } + } + + [Test] + public void FailEarlyIfTimeoutIsExhausted_Async() + { + using (var fake = new AutoFake()) + { + var dateTimeProvider = ProvideDateTimeProvider(fake); + var config = ProvideConfiguration(dateTimeProvider); + var connection = ProvideConnection(fake, config, dateTimeProvider); + + var getCall = FakeCalls.GetCall(fake); + var ok = Task.FromResult(FakeResponse.Ok(config)); + var bad = Task.FromResult(FakeResponse.Bad(config)); + getCall.ReturnsNextFromSequence( + bad, + bad, + ok + ); + + var seenNodes = new List(); + getCall.Invokes((Uri u, IRequestConfiguration o) => seenNodes.Add(u)); + + var pingCall = FakeCalls.PingAtConnectionLevelAsync(fake); + pingCall.Returns(ok); + + var client1 = fake.Resolve(); + //event though the third node should have returned ok, the first 2 calls took a minute + var e = Assert.Throws(async () => await client1.InfoAsync()); + e.Message.Should() + .StartWith("Retry timeout 00:01:20 was hit after retrying 1 times:"); + + IElasticsearchResponse response = null; + Assert.DoesNotThrow(async () => response = await client1.InfoAsync() ); + response.Should().NotBeNull(); + response.Success.Should().BeTrue(); + } + } + + private static IConnection ProvideConnection(AutoFake fake, ConnectionConfiguration config, IDateTimeProvider dateTimeProvider) + { + fake.Provide(config); + var param = new TypedParameter(typeof(IDateTimeProvider), dateTimeProvider); + var transport = fake.Provide(param); + var connection = fake.Resolve(); + return connection; + } + + private static ConnectionConfiguration ProvideConfiguration(IDateTimeProvider dateTimeProvider) + { + var connectionPool = new StaticConnectionPool(new[] + { + new Uri("http://localhost:9204"), + new Uri("http://localhost:9203"), + new Uri("http://localhost:9202"), + new Uri("http://localhost:9201") + }, randomizeOnStartup: false, dateTimeProvider: dateTimeProvider); + var config = new ConnectionConfiguration(connectionPool) + .SetTimeout(20) + .SetMaxRetryTimeout(TimeSpan.FromSeconds(80)); + return config; + } + + private static IDateTimeProvider ProvideDateTimeProvider(AutoFake fake) + { + var now = DateTime.UtcNow; + var dateTimeProvider = fake.Resolve(); + var nowCall = A.CallTo(() => dateTimeProvider.Now()); + nowCall.ReturnsNextFromSequence( + now, //initital sniff now from constructor + now, //pool select next node + now.AddSeconds(30), //info 1 took to long? + now.AddSeconds(30), //pool select next node? + now.AddSeconds(80) //info 2 took to long? + ); + A.CallTo(() => dateTimeProvider.AliveTime(A._, A._)).Returns(new DateTime()); + //dead time will return a fixed timeout of 1 minute + A.CallTo(() => dateTimeProvider.DeadTime(A._, A._, A._, A._)) + .Returns(DateTime.UtcNow.AddMinutes(1)); + //make sure the transport layer uses a different datetimeprovider + fake.Provide(new DateTimeProvider()); + return dateTimeProvider; + } + } +}