Skip to content

Commit

Permalink
Add tests, docs, minor fixes (wal-g#1337)
Browse files Browse the repository at this point in the history
  • Loading branch information
usernamedt authored Sep 16, 2022
1 parent 3e1f315 commit 7c2fd58
Show file tree
Hide file tree
Showing 9 changed files with 512 additions and 6 deletions.
22 changes: 22 additions & 0 deletions docs/Greenplum.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,28 @@ After the successful configuration, use the `backup-push` command from the coord
wal-g backup-push --config=/path/to/config.yaml
```

#### Delta backups (work in progress)

* `WALG_DELTA_MAX_STEPS`

Delta-backup is the difference between previously taken backup and present state. `WALG_DELTA_MAX_STEPS` determines how many delta backups can be between full backups. Defaults to 0.
Restoration process will automatically fetch all necessary deltas and base backup and compose valid restored backup (you still need WALs after start of last backup to restore consistent cluster).

Delta computation is based on ModTime of file system and LSN number of pages in datafiles for heap relations and on ModCount + EOF combination for AO/AOCS relations.

##### Create delta from specific backup
When creating delta backup (`WALG_DELTA_MAX_STEPS` > 0), WAL-G uses the latest backup as the base by default. This behaviour can be changed via following flags:

* `--delta-from-name` flag or `WALG_DELTA_FROM_NAME` environment variable to choose the backup with specified name as the base for the delta backup

* `--delta-from-user-data` flag or `WALG_DELTA_FROM_USER_DATA` environment variable to choose the backup with specified user data as the base for the delta backup

Examples:
```bash
wal-g backup-push --delta-from-name backup_name --config=/path/to/config.yaml
wal-g backup-push --delta-from-user-data "{ \"x\": [3], \"y\": 4 }" --config=/path/to/config.yaml
```

### ``backup-fetch``

When fetching base backups, the user should pass in the cluster restore configuration and the name of the backup.
Expand Down
5 changes: 4 additions & 1 deletion internal/databases/greenplum/ao_increment.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ func ApplyFileIncrement(fileName string, increment io.Reader, fsync bool) error
return nil
}

func newIncrementalPageReader(file io.ReadSeekCloser, eof, offset int64) (io.ReadCloser, error) {
func NewIncrementalPageReader(file io.ReadSeekCloser, eof, offset int64) (io.ReadCloser, error) {
if eof <= offset {
return nil, fmt.Errorf("file eof %d is less or equal than offset %d", eof, offset)
}
var headerBuffer bytes.Buffer
headerBuffer.Write(IncrementFileHeader)
headerBuffer.Write(utility.ToBytes(uint64(eof)))
Expand Down
70 changes: 70 additions & 0 deletions internal/databases/greenplum/ao_increment_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
package greenplum_test

import (
"bytes"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/wal-g/wal-g/internal/databases/greenplum"
"github.com/wal-g/wal-g/internal/walparser/parsingutil"
"io"
"os"
"testing"
)

const aoSegmentFileName = "../../../test/testdata/gp_ao_file.bin"
const aoSegmentFileSizeBytes = 192

func TestReadIncrement(t *testing.T) {
gpReadIncrement(10, 100, t)
}

func TestReadIncrementFull(t *testing.T) {
gpReadIncrement(0, aoSegmentFileSizeBytes, t)
}

func TestFailOnIncorrectOffset(t *testing.T) {
file, err := os.Open(aoSegmentFileName)
if err != nil {
fmt.Print(err.Error())
}

_, err = greenplum.NewIncrementalPageReader(file, aoSegmentFileSizeBytes, aoSegmentFileSizeBytes)
assert.Error(t, err)

_, err = greenplum.NewIncrementalPageReader(file, 0, aoSegmentFileSizeBytes)
assert.Error(t, err)
}

func gpReadIncrement(offset, eof int64, t *testing.T) {
file, err := os.Open(aoSegmentFileName)
if err != nil {
fmt.Print(err.Error())
}

reader, err := greenplum.NewIncrementalPageReader(file, eof, offset)
assert.NoError(t, err)

increment, err := io.ReadAll(reader)
assert.NoError(t, err)

incrementBuf := bytes.NewBuffer(increment)
err = greenplum.ReadIncrementFileHeader(incrementBuf)
assert.NoError(t, err)

var parsedEof uint64
var parsedOffset uint64
err = parsingutil.ParseMultipleFieldsFromReader([]parsingutil.FieldToParse{
{Field: &parsedEof, Name: "eof"},
{Field: &parsedOffset, Name: "offset"},
}, incrementBuf)

assert.Equal(t, parsedOffset, uint64(offset))
assert.Equal(t, parsedEof, uint64(eof))

_, _ = file.Seek(offset, io.SeekStart)

fileFragment := new(bytes.Buffer)
_, _ = io.CopyN(fileFragment, file, eof-offset)

assert.True(t, bytes.Equal(fileFragment.Bytes(), incrementBuf.Bytes()))
}
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (u *AoStorageUploader) GetFiles() *AOFilesMetadataDTO {

func (u *AoStorageUploader) skipAoUpload(cfi *internal.ComposeFileInfo, aoMeta AoRelFileMetadata, storageKey string) error {
u.addAoFileMetadata(cfi, storageKey, aoMeta, true, false)
u.bundleFiles.AddFile(cfi.Header, cfi.FileInfo, false)
u.bundleFiles.AddSkippedFile(cfi.Header, cfi.FileInfo)
tracelog.DebugLogger.Printf("Skipping %s AO relfile (already exists in storage as %s)", cfi.Path, storageKey)
return nil
}
Expand Down Expand Up @@ -147,7 +147,7 @@ func (u *AoStorageUploader) incrementalAoUpload(
return err
}

incrementalReader, err := newIncrementalPageReader(file, aoMeta.eof, baseFileEOF)
incrementalReader, err := NewIncrementalPageReader(file, aoMeta.eof, baseFileEOF)
if err != nil {
return err
}
Expand Down
Loading

0 comments on commit 7c2fd58

Please sign in to comment.