| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014 |
- package copying_test
- import (
- "bytes"
- "context"
- "crypto/rand"
- "fmt"
- "io"
- mathrand "math/rand"
- "net/url"
- "strings"
- "testing"
- "time"
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/config"
- "github.com/aws/aws-sdk-go-v2/credentials"
- "github.com/aws/aws-sdk-go-v2/service/s3"
- "github.com/aws/aws-sdk-go-v2/service/s3/types"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- )
- // S3TestConfig holds configuration for S3 tests
- type S3TestConfig struct {
- Endpoint string
- AccessKey string
- SecretKey string
- Region string
- BucketPrefix string
- UseSSL bool
- SkipVerifySSL bool
- }
- // Default test configuration - should match test_config.json
- var defaultConfig = &S3TestConfig{
- Endpoint: "http://127.0.0.1:8000", // Use explicit IPv4 address
- AccessKey: "some_access_key1",
- SecretKey: "some_secret_key1",
- Region: "us-east-1",
- BucketPrefix: "test-copying-",
- UseSSL: false,
- SkipVerifySSL: true,
- }
- // Initialize math/rand with current time to ensure randomness
- func init() {
- mathrand.Seed(time.Now().UnixNano())
- }
- // getS3Client creates an AWS S3 client for testing
- func getS3Client(t *testing.T) *s3.Client {
- cfg, err := config.LoadDefaultConfig(context.TODO(),
- config.WithRegion(defaultConfig.Region),
- config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
- defaultConfig.AccessKey,
- defaultConfig.SecretKey,
- "",
- )),
- config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
- func(service, region string, options ...interface{}) (aws.Endpoint, error) {
- return aws.Endpoint{
- URL: defaultConfig.Endpoint,
- SigningRegion: defaultConfig.Region,
- HostnameImmutable: true,
- }, nil
- })),
- )
- require.NoError(t, err)
- return s3.NewFromConfig(cfg, func(o *s3.Options) {
- o.UsePathStyle = true // Important for SeaweedFS
- })
- }
- // waitForS3Service waits for the S3 service to be ready
- func waitForS3Service(t *testing.T, client *s3.Client, timeout time.Duration) {
- start := time.Now()
- for time.Since(start) < timeout {
- _, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
- if err == nil {
- return
- }
- t.Logf("Waiting for S3 service to be ready... (error: %v)", err)
- time.Sleep(time.Second)
- }
- t.Fatalf("S3 service not ready after %v", timeout)
- }
- // getNewBucketName generates a unique bucket name
- func getNewBucketName() string {
- timestamp := time.Now().UnixNano()
- // Add random suffix to prevent collisions when tests run quickly
- randomSuffix := mathrand.Intn(100000)
- return fmt.Sprintf("%s%d-%d", defaultConfig.BucketPrefix, timestamp, randomSuffix)
- }
- // cleanupTestBuckets removes any leftover test buckets from previous runs
- func cleanupTestBuckets(t *testing.T, client *s3.Client) {
- resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
- if err != nil {
- t.Logf("Warning: failed to list buckets for cleanup: %v", err)
- return
- }
- for _, bucket := range resp.Buckets {
- bucketName := *bucket.Name
- // Only delete buckets that match our test prefix
- if strings.HasPrefix(bucketName, defaultConfig.BucketPrefix) {
- t.Logf("Cleaning up leftover test bucket: %s", bucketName)
- deleteBucket(t, client, bucketName)
- }
- }
- }
- // createBucket creates a new bucket for testing
- func createBucket(t *testing.T, client *s3.Client, bucketName string) {
- // First, try to delete the bucket if it exists (cleanup from previous failed tests)
- deleteBucket(t, client, bucketName)
- // Create the bucket
- _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
- Bucket: aws.String(bucketName),
- })
- require.NoError(t, err)
- }
- // deleteBucket deletes a bucket and all its contents
- func deleteBucket(t *testing.T, client *s3.Client, bucketName string) {
- // First, delete all objects
- deleteAllObjects(t, client, bucketName)
- // Then delete the bucket
- _, err := client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
- Bucket: aws.String(bucketName),
- })
- if err != nil {
- // Only log warnings for actual errors, not "bucket doesn't exist"
- if !strings.Contains(err.Error(), "NoSuchBucket") {
- t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err)
- }
- }
- }
- // deleteAllObjects deletes all objects in a bucket
- func deleteAllObjects(t *testing.T, client *s3.Client, bucketName string) {
- // List all objects
- paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{
- Bucket: aws.String(bucketName),
- })
- for paginator.HasMorePages() {
- page, err := paginator.NextPage(context.TODO())
- if err != nil {
- // Only log warnings for actual errors, not "bucket doesn't exist"
- if !strings.Contains(err.Error(), "NoSuchBucket") {
- t.Logf("Warning: failed to list objects in bucket %s: %v", bucketName, err)
- }
- return
- }
- if len(page.Contents) == 0 {
- break
- }
- var objectsToDelete []types.ObjectIdentifier
- for _, obj := range page.Contents {
- objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
- Key: obj.Key,
- })
- }
- // Delete objects in batches
- if len(objectsToDelete) > 0 {
- _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
- Bucket: aws.String(bucketName),
- Delete: &types.Delete{
- Objects: objectsToDelete,
- Quiet: aws.Bool(true),
- },
- })
- if err != nil {
- t.Logf("Warning: failed to delete objects in bucket %s: %v", bucketName, err)
- }
- }
- }
- }
- // putObject puts an object into a bucket
- func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput {
- resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(key),
- Body: strings.NewReader(content),
- })
- require.NoError(t, err)
- return resp
- }
- // putObjectWithMetadata puts an object with metadata into a bucket
- func putObjectWithMetadata(t *testing.T, client *s3.Client, bucketName, key, content string, metadata map[string]string, contentType string) *s3.PutObjectOutput {
- input := &s3.PutObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(key),
- Body: strings.NewReader(content),
- }
- if metadata != nil {
- input.Metadata = metadata
- }
- if contentType != "" {
- input.ContentType = aws.String(contentType)
- }
- resp, err := client.PutObject(context.TODO(), input)
- require.NoError(t, err)
- return resp
- }
- // getObject gets an object from a bucket
- func getObject(t *testing.T, client *s3.Client, bucketName, key string) *s3.GetObjectOutput {
- resp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(key),
- })
- require.NoError(t, err)
- return resp
- }
- // getObjectBody gets the body content of an object
- func getObjectBody(t *testing.T, resp *s3.GetObjectOutput) string {
- body, err := io.ReadAll(resp.Body)
- require.NoError(t, err)
- resp.Body.Close()
- return string(body)
- }
- // generateRandomData generates random data of specified size
- func generateRandomData(size int) []byte {
- data := make([]byte, size)
- _, err := rand.Read(data)
- if err != nil {
- panic(err)
- }
- return data
- }
- // createCopySource creates a properly URL-encoded copy source string
- func createCopySource(bucketName, key string) string {
- // URL encode the key to handle special characters like spaces
- encodedKey := url.PathEscape(key)
- return fmt.Sprintf("%s/%s", bucketName, encodedKey)
- }
- // TestBasicPutGet tests basic S3 put and get operations
- func TestBasicPutGet(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Test 1: Put and get a simple text object
- t.Run("Simple text object", func(t *testing.T) {
- key := "test-simple.txt"
- content := "Hello, SeaweedFS S3!"
- // Put object
- putResp := putObject(t, client, bucketName, key, content)
- assert.NotNil(t, putResp.ETag)
- // Get object
- getResp := getObject(t, client, bucketName, key)
- body := getObjectBody(t, getResp)
- assert.Equal(t, content, body)
- assert.Equal(t, putResp.ETag, getResp.ETag)
- })
- // Test 2: Put and get an empty object
- t.Run("Empty object", func(t *testing.T) {
- key := "test-empty.txt"
- content := ""
- putResp := putObject(t, client, bucketName, key, content)
- assert.NotNil(t, putResp.ETag)
- getResp := getObject(t, client, bucketName, key)
- body := getObjectBody(t, getResp)
- assert.Equal(t, content, body)
- assert.Equal(t, putResp.ETag, getResp.ETag)
- })
- // Test 3: Put and get a binary object
- t.Run("Binary object", func(t *testing.T) {
- key := "test-binary.bin"
- content := string(generateRandomData(1024)) // 1KB of random data
- putResp := putObject(t, client, bucketName, key, content)
- assert.NotNil(t, putResp.ETag)
- getResp := getObject(t, client, bucketName, key)
- body := getObjectBody(t, getResp)
- assert.Equal(t, content, body)
- assert.Equal(t, putResp.ETag, getResp.ETag)
- })
- // Test 4: Put and get object with metadata
- t.Run("Object with metadata", func(t *testing.T) {
- key := "test-metadata.txt"
- content := "Content with metadata"
- metadata := map[string]string{
- "author": "test",
- "description": "test object with metadata",
- }
- contentType := "text/plain"
- putResp := putObjectWithMetadata(t, client, bucketName, key, content, metadata, contentType)
- assert.NotNil(t, putResp.ETag)
- getResp := getObject(t, client, bucketName, key)
- body := getObjectBody(t, getResp)
- assert.Equal(t, content, body)
- assert.Equal(t, putResp.ETag, getResp.ETag)
- assert.Equal(t, contentType, *getResp.ContentType)
- assert.Equal(t, metadata["author"], getResp.Metadata["author"])
- assert.Equal(t, metadata["description"], getResp.Metadata["description"])
- })
- }
- // TestBasicBucketOperations tests basic bucket operations
- func TestBasicBucketOperations(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Test 1: Create bucket
- t.Run("Create bucket", func(t *testing.T) {
- createBucket(t, client, bucketName)
- // Verify bucket exists by listing buckets
- resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
- require.NoError(t, err)
- found := false
- for _, bucket := range resp.Buckets {
- if *bucket.Name == bucketName {
- found = true
- break
- }
- }
- assert.True(t, found, "Bucket should exist after creation")
- })
- // Test 2: Put objects and list them
- t.Run("List objects", func(t *testing.T) {
- // Put multiple objects
- objects := []string{"test1.txt", "test2.txt", "dir/test3.txt"}
- for _, key := range objects {
- putObject(t, client, bucketName, key, fmt.Sprintf("content of %s", key))
- }
- // List objects
- resp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
- Bucket: aws.String(bucketName),
- })
- require.NoError(t, err)
- assert.Equal(t, len(objects), len(resp.Contents))
- // Verify each object exists
- for _, obj := range resp.Contents {
- found := false
- for _, expected := range objects {
- if *obj.Key == expected {
- found = true
- break
- }
- }
- assert.True(t, found, "Object %s should be in list", *obj.Key)
- }
- })
- // Test 3: Delete bucket (cleanup)
- t.Run("Delete bucket", func(t *testing.T) {
- deleteBucket(t, client, bucketName)
- // Verify bucket is deleted by trying to list its contents
- _, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
- Bucket: aws.String(bucketName),
- })
- assert.Error(t, err, "Bucket should not exist after deletion")
- })
- }
- // TestBasicLargeObject tests handling of larger objects (up to volume limit)
- func TestBasicLargeObject(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Test with progressively larger objects
- sizes := []int{
- 1024, // 1KB
- 1024 * 10, // 10KB
- 1024 * 100, // 100KB
- 1024 * 1024, // 1MB
- 1024 * 1024 * 5, // 5MB
- 1024 * 1024 * 10, // 10MB
- }
- for _, size := range sizes {
- t.Run(fmt.Sprintf("Size_%dMB", size/(1024*1024)), func(t *testing.T) {
- key := fmt.Sprintf("large-object-%d.bin", size)
- content := string(generateRandomData(size))
- putResp := putObject(t, client, bucketName, key, content)
- assert.NotNil(t, putResp.ETag)
- getResp := getObject(t, client, bucketName, key)
- body := getObjectBody(t, getResp)
- assert.Equal(t, len(content), len(body))
- assert.Equal(t, content, body)
- assert.Equal(t, putResp.ETag, getResp.ETag)
- })
- }
- }
- // TestObjectCopySameBucket tests copying an object within the same bucket
- func TestObjectCopySameBucket(t *testing.T) {
- client := getS3Client(t)
- // Wait for S3 service to be ready
- waitForS3Service(t, client, 30*time.Second)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo123bar"
- sourceContent := "foo"
- putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object within the same bucket
- destKey := "bar321foo"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- })
- require.NoError(t, err, "Failed to copy object within same bucket")
- // Verify the copied object
- resp := getObject(t, client, bucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- }
- // TestObjectCopyDiffBucket tests copying an object to a different bucket
- func TestObjectCopyDiffBucket(t *testing.T) {
- client := getS3Client(t)
- sourceBucketName := getNewBucketName()
- destBucketName := getNewBucketName()
- // Create buckets
- createBucket(t, client, sourceBucketName)
- defer deleteBucket(t, client, sourceBucketName)
- createBucket(t, client, destBucketName)
- defer deleteBucket(t, client, destBucketName)
- // Put source object
- sourceKey := "foo123bar"
- sourceContent := "foo"
- putObject(t, client, sourceBucketName, sourceKey, sourceContent)
- // Copy object to different bucket
- destKey := "bar321foo"
- copySource := createCopySource(sourceBucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, destBucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- }
- // TestObjectCopyCannedAcl tests copying with ACL settings
- func TestObjectCopyCannedAcl(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo123bar"
- sourceContent := "foo"
- putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object with public-read ACL
- destKey := "bar321foo"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- ACL: types.ObjectCannedACLPublicRead,
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, bucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- // Test metadata replacement with ACL
- metadata := map[string]string{"abc": "def"}
- destKey2 := "foo123bar2"
- copySource2 := createCopySource(bucketName, destKey)
- _, err = client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey2),
- CopySource: aws.String(copySource2),
- ACL: types.ObjectCannedACLPublicRead,
- Metadata: metadata,
- MetadataDirective: types.MetadataDirectiveReplace,
- })
- require.NoError(t, err)
- // Verify the copied object with metadata
- resp2 := getObject(t, client, bucketName, destKey2)
- body2 := getObjectBody(t, resp2)
- assert.Equal(t, sourceContent, body2)
- assert.Equal(t, metadata, resp2.Metadata)
- }
- // TestObjectCopyRetainingMetadata tests copying while retaining metadata
- func TestObjectCopyRetainingMetadata(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Test with different sizes
- sizes := []int{3, 1024 * 1024} // 3 bytes and 1MB
- for _, size := range sizes {
- t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
- sourceKey := fmt.Sprintf("foo123bar_%d", size)
- sourceContent := string(generateRandomData(size))
- contentType := "audio/ogg"
- metadata := map[string]string{"key1": "value1", "key2": "value2"}
- // Put source object with metadata
- putObjectWithMetadata(t, client, bucketName, sourceKey, sourceContent, metadata, contentType)
- // Copy object (should retain metadata)
- destKey := fmt.Sprintf("bar321foo_%d", size)
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, bucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- assert.Equal(t, contentType, *resp.ContentType)
- assert.Equal(t, metadata, resp.Metadata)
- require.NotNil(t, resp.ContentLength)
- assert.Equal(t, int64(size), *resp.ContentLength)
- })
- }
- }
- // TestMultipartCopySmall tests multipart copying of small files
- func TestMultipartCopySmall(t *testing.T) {
- client := getS3Client(t)
- // Clean up any leftover buckets from previous test runs
- cleanupTestBuckets(t, client)
- sourceBucketName := getNewBucketName()
- destBucketName := getNewBucketName()
- // Create buckets
- createBucket(t, client, sourceBucketName)
- defer deleteBucket(t, client, sourceBucketName)
- createBucket(t, client, destBucketName)
- defer deleteBucket(t, client, destBucketName)
- // Put source object
- sourceKey := "foo"
- sourceContent := "x" // 1 byte
- putObject(t, client, sourceBucketName, sourceKey, sourceContent)
- // Create multipart upload
- destKey := "mymultipart"
- createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- })
- require.NoError(t, err)
- uploadID := *createResp.UploadId
- // Upload part copy
- copySource := createCopySource(sourceBucketName, sourceKey)
- copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- PartNumber: aws.Int32(1),
- CopySource: aws.String(copySource),
- CopySourceRange: aws.String("bytes=0-0"),
- })
- require.NoError(t, err)
- // Complete multipart upload
- _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- MultipartUpload: &types.CompletedMultipartUpload{
- Parts: []types.CompletedPart{
- {
- ETag: copyResp.CopyPartResult.ETag,
- PartNumber: aws.Int32(1),
- },
- },
- },
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, destBucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- require.NotNil(t, resp.ContentLength)
- assert.Equal(t, int64(1), *resp.ContentLength)
- }
- // TestMultipartCopyWithoutRange tests multipart copying without range specification
- func TestMultipartCopyWithoutRange(t *testing.T) {
- client := getS3Client(t)
- // Clean up any leftover buckets from previous test runs
- cleanupTestBuckets(t, client)
- sourceBucketName := getNewBucketName()
- destBucketName := getNewBucketName()
- // Create buckets
- createBucket(t, client, sourceBucketName)
- defer deleteBucket(t, client, sourceBucketName)
- createBucket(t, client, destBucketName)
- defer deleteBucket(t, client, destBucketName)
- // Put source object
- sourceKey := "source"
- sourceContent := string(generateRandomData(10))
- putObject(t, client, sourceBucketName, sourceKey, sourceContent)
- // Create multipart upload
- destKey := "mymultipartcopy"
- createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- })
- require.NoError(t, err)
- uploadID := *createResp.UploadId
- // Upload part copy without range (should copy entire object)
- copySource := createCopySource(sourceBucketName, sourceKey)
- copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- PartNumber: aws.Int32(1),
- CopySource: aws.String(copySource),
- })
- require.NoError(t, err)
- // Complete multipart upload
- _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- MultipartUpload: &types.CompletedMultipartUpload{
- Parts: []types.CompletedPart{
- {
- ETag: copyResp.CopyPartResult.ETag,
- PartNumber: aws.Int32(1),
- },
- },
- },
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, destBucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- require.NotNil(t, resp.ContentLength)
- assert.Equal(t, int64(10), *resp.ContentLength)
- }
- // TestMultipartCopySpecialNames tests multipart copying with special character names
- func TestMultipartCopySpecialNames(t *testing.T) {
- client := getS3Client(t)
- // Clean up any leftover buckets from previous test runs
- cleanupTestBuckets(t, client)
- sourceBucketName := getNewBucketName()
- destBucketName := getNewBucketName()
- // Create buckets
- createBucket(t, client, sourceBucketName)
- defer deleteBucket(t, client, sourceBucketName)
- createBucket(t, client, destBucketName)
- defer deleteBucket(t, client, destBucketName)
- // Test with special key names
- specialKeys := []string{" ", "_", "__", "?versionId"}
- sourceContent := "x" // 1 byte
- destKey := "mymultipart"
- for i, sourceKey := range specialKeys {
- t.Run(fmt.Sprintf("special_key_%d", i), func(t *testing.T) {
- // Put source object
- putObject(t, client, sourceBucketName, sourceKey, sourceContent)
- // Create multipart upload
- createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- })
- require.NoError(t, err)
- uploadID := *createResp.UploadId
- // Upload part copy
- copySource := createCopySource(sourceBucketName, sourceKey)
- copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- PartNumber: aws.Int32(1),
- CopySource: aws.String(copySource),
- CopySourceRange: aws.String("bytes=0-0"),
- })
- require.NoError(t, err)
- // Complete multipart upload
- _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- MultipartUpload: &types.CompletedMultipartUpload{
- Parts: []types.CompletedPart{
- {
- ETag: copyResp.CopyPartResult.ETag,
- PartNumber: aws.Int32(1),
- },
- },
- },
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, destBucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- require.NotNil(t, resp.ContentLength)
- assert.Equal(t, int64(1), *resp.ContentLength)
- })
- }
- }
- // TestMultipartCopyMultipleSizes tests multipart copying with various file sizes
- func TestMultipartCopyMultipleSizes(t *testing.T) {
- client := getS3Client(t)
- // Clean up any leftover buckets from previous test runs
- cleanupTestBuckets(t, client)
- sourceBucketName := getNewBucketName()
- destBucketName := getNewBucketName()
- // Create buckets
- createBucket(t, client, sourceBucketName)
- defer deleteBucket(t, client, sourceBucketName)
- createBucket(t, client, destBucketName)
- defer deleteBucket(t, client, destBucketName)
- // Put source object (12MB)
- sourceKey := "foo"
- sourceSize := 12 * 1024 * 1024
- sourceContent := generateRandomData(sourceSize)
- _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
- Bucket: aws.String(sourceBucketName),
- Key: aws.String(sourceKey),
- Body: bytes.NewReader(sourceContent),
- })
- require.NoError(t, err)
- destKey := "mymultipart"
- partSize := 5 * 1024 * 1024 // 5MB parts
- // Test different copy sizes
- testSizes := []int{
- 5 * 1024 * 1024, // 5MB
- 5*1024*1024 + 100*1024, // 5MB + 100KB
- 5*1024*1024 + 600*1024, // 5MB + 600KB
- 10*1024*1024 + 100*1024, // 10MB + 100KB
- 10*1024*1024 + 600*1024, // 10MB + 600KB
- 10 * 1024 * 1024, // 10MB
- }
- for _, size := range testSizes {
- t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
- // Create multipart upload
- createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- })
- require.NoError(t, err)
- uploadID := *createResp.UploadId
- // Upload parts
- var parts []types.CompletedPart
- copySource := createCopySource(sourceBucketName, sourceKey)
- for i := 0; i < size; i += partSize {
- partNum := int32(len(parts) + 1)
- endOffset := i + partSize - 1
- if endOffset >= size {
- endOffset = size - 1
- }
- copyRange := fmt.Sprintf("bytes=%d-%d", i, endOffset)
- copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- PartNumber: aws.Int32(partNum),
- CopySource: aws.String(copySource),
- CopySourceRange: aws.String(copyRange),
- })
- require.NoError(t, err)
- parts = append(parts, types.CompletedPart{
- ETag: copyResp.CopyPartResult.ETag,
- PartNumber: aws.Int32(partNum),
- })
- }
- // Complete multipart upload
- _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
- Bucket: aws.String(destBucketName),
- Key: aws.String(destKey),
- UploadId: aws.String(uploadID),
- MultipartUpload: &types.CompletedMultipartUpload{
- Parts: parts,
- },
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, destBucketName, destKey)
- body, err := io.ReadAll(resp.Body)
- require.NoError(t, err)
- resp.Body.Close()
- require.NotNil(t, resp.ContentLength)
- assert.Equal(t, int64(size), *resp.ContentLength)
- assert.Equal(t, sourceContent[:size], body)
- })
- }
- }
- // TestCopyObjectIfMatchGood tests copying with matching ETag condition
- func TestCopyObjectIfMatchGood(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo"
- sourceContent := "bar"
- putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object with matching ETag
- destKey := "bar"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- CopySourceIfMatch: putResp.ETag,
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, bucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- }
- // TestCopyObjectIfNoneMatchFailed tests copying with non-matching ETag condition
- func TestCopyObjectIfNoneMatchFailed(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo"
- sourceContent := "bar"
- putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object with non-matching ETag (should succeed)
- destKey := "bar"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- CopySourceIfNoneMatch: aws.String("ABCORZ"),
- })
- require.NoError(t, err)
- // Verify the copied object
- resp := getObject(t, client, bucketName, destKey)
- body := getObjectBody(t, resp)
- assert.Equal(t, sourceContent, body)
- }
- // TestCopyObjectIfMatchFailed tests copying with non-matching ETag condition (should fail)
- func TestCopyObjectIfMatchFailed(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo"
- sourceContent := "bar"
- putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object with non-matching ETag (should fail)
- destKey := "bar"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- CopySourceIfMatch: aws.String("ABCORZ"),
- })
- // Should fail with precondition failed
- require.Error(t, err)
- // Note: We could check for specific error types, but SeaweedFS might return different error codes
- }
- // TestCopyObjectIfNoneMatchGood tests copying with matching ETag condition (should fail)
- func TestCopyObjectIfNoneMatchGood(t *testing.T) {
- client := getS3Client(t)
- bucketName := getNewBucketName()
- // Create bucket
- createBucket(t, client, bucketName)
- defer deleteBucket(t, client, bucketName)
- // Put source object
- sourceKey := "foo"
- sourceContent := "bar"
- putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
- // Copy object with matching ETag for IfNoneMatch (should fail)
- destKey := "bar"
- copySource := createCopySource(bucketName, sourceKey)
- _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(destKey),
- CopySource: aws.String(copySource),
- CopySourceIfNoneMatch: putResp.ETag,
- })
- // Should fail with precondition failed
- require.Error(t, err)
- }
|