Skip to content

Commit

Permalink
feat: add retry trace id and attempt details into debug log #897 (#909)
Browse files Browse the repository at this point in the history
  • Loading branch information
jeevatkm authored Nov 13, 2024
1 parent 9e0e070 commit 4225157
Show file tree
Hide file tree
Showing 4 changed files with 167 additions and 5 deletions.
4 changes: 4 additions & 0 deletions request.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ type Request struct {
RetryStrategy RetryStrategyFunc
IsRetryDefaultConditions bool

// RetryTraceID provides GUID for retry count > 0
RetryTraceID string

// Attempt provides insights into no. of attempts
// Resty made.
//
Expand Down Expand Up @@ -1260,6 +1263,7 @@ func (r *Request) Execute(method, url string) (res *Response, err error) {
var backoff *backoffWithJitter
if r.RetryCount > 0 && isIdempotent {
backoff = newBackoffWithJitter(r.RetryWaitTime, r.RetryMaxWaitTime)
r.RetryTraceID = newGUID()
}

isInvalidRequestErr := false
Expand Down
14 changes: 10 additions & 4 deletions retry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,10 @@ func TestConditionalGetRequestLevel(t *testing.T) {
})

// Clear the default client.
c := dcnl()
c, lb := dcldb()

resp, err := c.R().
EnableDebug().
AddRetryCondition(check).
SetRetryCount(1).
SetRetryWaitTime(50*time.Millisecond).
Expand All @@ -77,6 +79,7 @@ func TestConditionalGetRequestLevel(t *testing.T) {
assertEqual(t, "TestGet: text response", resp.String())
assertEqual(t, 1, resp.Request.Attempt)
assertEqual(t, 1, externalCounter)
assertEqual(t, true, strings.Contains(lb.String(), "RETRY TRACE ID:"))

logResponse(t, resp)
}
Expand Down Expand Up @@ -111,8 +114,9 @@ func TestClientRetryWithMinAndMaxWaitTime(t *testing.T) {
retryWaitTime := 10 * time.Millisecond
retryMaxWaitTime := 100 * time.Millisecond

c := dcnl().
SetRetryCount(retryCount).
c, lb := dcldb()

c.SetRetryCount(retryCount).
SetRetryWaitTime(retryWaitTime).
SetRetryMaxWaitTime(retryMaxWaitTime).
AddRetryCondition(
Expand All @@ -121,13 +125,15 @@ func TestClientRetryWithMinAndMaxWaitTime(t *testing.T) {
return true
},
)
res, _ := c.R().Get(ts.URL + "/set-retrywaittime-test")
res, _ := c.R().EnableDebug().Get(ts.URL + "/set-retrywaittime-test")

retryIntervals[res.Request.Attempt-1] = parseTimeSleptFromResponse(res.String())

// retryCount+1 == attempts were made
assertEqual(t, retryCount+1, res.Request.Attempt)

assertEqual(t, true, strings.Contains(lb.String(), "RETRY TRACE ID:"))

// Initial attempt has 0 time slept since last request
assertEqual(t, retryIntervals[0], uint64(0))

Expand Down
98 changes: 97 additions & 1 deletion util.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@ package resty

import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"log"
Expand All @@ -17,6 +22,7 @@ import (
"runtime"
"sort"
"strings"
"sync/atomic"
"time"
)

Expand Down Expand Up @@ -348,11 +354,16 @@ func requestDebugLogger(c *Client, r *Request) {
reqLog += "~~~ REQUEST ~~~\n" +
fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
fmt.Sprintf("ATTEMPT: %d\n", r.Attempt) +
fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(rl.Header)) +
fmt.Sprintf("BODY :\n%v\n", rl.Body) +
"------------------------------------------------------------------------------\n"

if len(r.RetryTraceID) > 0 {
reqLog += fmt.Sprintf("RETRY TRACE ID: %s\n", r.RetryTraceID) +
fmt.Sprintf("ATTEMPT : %d\n", r.Attempt) +
"------------------------------------------------------------------------------\n"
}

r.initValuesMap()
r.values[debugRequestLogKey] = reqLog
}
Expand Down Expand Up @@ -392,3 +403,88 @@ func responseDebugLogger(c *Client, res *Response) {

res.Request.log.Debugf("%s", debugLog)
}

//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// GUID generation
// Code inspired from mgo/bson ObjectId
// Code obtained from https://github.com/go-aah/aah/blob/edge/essentials/guid.go
//___________________________________

var (
// guidCounter is atomically incremented when generating a new GUID
// using UniqueID() function. It's used as a counter part of an id.
guidCounter = readRandomUint32()

// machineID stores machine id generated once and used in subsequent calls
// to UniqueId function.
machineID = readMachineID()

// processID is current Process Id
processID = os.Getpid()
)

// newGUID method returns a new Globally Unique Identifier (GUID).
//
// The 12-byte `UniqueId` consists of-
// - 4-byte value representing the seconds since the Unix epoch,
// - 3-byte machine identifier,
// - 2-byte process id, and
// - 3-byte counter, starting with a random value.
//
// Uses Mongo Object ID algorithm to generate globally unique ids -
// https://docs.mongodb.com/manual/reference/method/ObjectId/
func newGUID() string {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))

// Machine, first 3 bytes of md5(hostname)
b[4], b[5], b[6] = machineID[0], machineID[1], machineID[2]

// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
b[7], b[8] = byte(processID>>8), byte(processID)

// Increment, 3 bytes, big endian
i := atomic.AddUint32(&guidCounter, 1)
b[9], b[10], b[11] = byte(i>>16), byte(i>>8), byte(i)

return hex.EncodeToString(b[:])
}

var ioReadFull = io.ReadFull

// readRandomUint32 returns a random guidCounter.
func readRandomUint32() uint32 {
var b [4]byte
if _, err := ioReadFull(rand.Reader, b[:]); err == nil {
return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}

// To initialize package unexported variable 'guidCounter'.
// This panic would happen at program startup, so no worries at runtime panic.
panic(errors.New("resty - guid: unable to generate random object id"))
}

var osHostname = os.Hostname

// readMachineID generates and returns a machine id.
// If this function fails to get the hostname it will cause a runtime error.
func readMachineID() []byte {
var sum [3]byte
id := sum[:]

if hostname, err := osHostname(); err == nil {
hw := md5.New()
_, _ = hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}

if _, err := ioReadFull(rand.Reader, id); err == nil {
return id
}

// To initialize package unexported variable 'machineID'.
// This panic would happen at program startup, so no worries at runtime panic.
panic(errors.New("resty - guid: unable to get hostname and random bytes"))
}
56 changes: 56 additions & 0 deletions util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ package resty
import (
"bytes"
"errors"
"io"
"net/url"
"os"
"path/filepath"
Expand Down Expand Up @@ -124,6 +125,61 @@ func Test_createDirectory(t *testing.T) {
assertEqual(t, errMsg, err.Error())
}

func TestUtil_readRandomUint32(t *testing.T) {
defer func() {
if r := recover(); r == nil {
// panic: resty - guid: unable to generate random object id
t.Errorf("The code did not panic")
}
}()
errMsg := "read full error"
ioReadFull = func(_ io.Reader, _ []byte) (int, error) {
return 0, errors.New(errMsg)
}
t.Cleanup(func() {
ioReadFull = io.ReadFull
})

readRandomUint32()
}

func TestUtil_readMachineID(t *testing.T) {
t.Run("hostname error", func(t *testing.T) {
errHostMsg := "hostname error"
osHostname = func() (string, error) {
return "", errors.New(errHostMsg)
}
t.Cleanup(func() {
osHostname = os.Hostname
})

readMachineID()
})

t.Run("hostname and read full error", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
// panic: resty - guid: unable to get hostname and random bytes
t.Errorf("The code did not panic")
}
}()
errHostMsg := "hostname error"
osHostname = func() (string, error) {
return "", errors.New(errHostMsg)
}
errReadMsg := "read full error"
ioReadFull = func(_ io.Reader, _ []byte) (int, error) {
return 0, errors.New(errReadMsg)
}
t.Cleanup(func() {
osHostname = os.Hostname
ioReadFull = io.ReadFull
})

readMachineID()
})
}

// This test methods exist for test coverage purpose
// to validate the getter and setter
func TestUtilMiscTestCoverage(t *testing.T) {
Expand Down

0 comments on commit 4225157

Please sign in to comment.