s3_copying_test.go 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. package copying_test
  2. import (
  3. "bytes"
  4. "context"
  5. "crypto/rand"
  6. "fmt"
  7. "io"
  8. mathrand "math/rand"
  9. "net/url"
  10. "strings"
  11. "testing"
  12. "time"
  13. "github.com/aws/aws-sdk-go-v2/aws"
  14. "github.com/aws/aws-sdk-go-v2/config"
  15. "github.com/aws/aws-sdk-go-v2/credentials"
  16. "github.com/aws/aws-sdk-go-v2/service/s3"
  17. "github.com/aws/aws-sdk-go-v2/service/s3/types"
  18. "github.com/stretchr/testify/assert"
  19. "github.com/stretchr/testify/require"
  20. )
  21. // S3TestConfig holds configuration for S3 tests
  22. type S3TestConfig struct {
  23. Endpoint string
  24. AccessKey string
  25. SecretKey string
  26. Region string
  27. BucketPrefix string
  28. UseSSL bool
  29. SkipVerifySSL bool
  30. }
  31. // Default test configuration - should match test_config.json
  32. var defaultConfig = &S3TestConfig{
  33. Endpoint: "http://127.0.0.1:8000", // Use explicit IPv4 address
  34. AccessKey: "some_access_key1",
  35. SecretKey: "some_secret_key1",
  36. Region: "us-east-1",
  37. BucketPrefix: "test-copying-",
  38. UseSSL: false,
  39. SkipVerifySSL: true,
  40. }
  41. // Initialize math/rand with current time to ensure randomness
  42. func init() {
  43. mathrand.Seed(time.Now().UnixNano())
  44. }
  45. // getS3Client creates an AWS S3 client for testing
  46. func getS3Client(t *testing.T) *s3.Client {
  47. cfg, err := config.LoadDefaultConfig(context.TODO(),
  48. config.WithRegion(defaultConfig.Region),
  49. config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
  50. defaultConfig.AccessKey,
  51. defaultConfig.SecretKey,
  52. "",
  53. )),
  54. config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
  55. func(service, region string, options ...interface{}) (aws.Endpoint, error) {
  56. return aws.Endpoint{
  57. URL: defaultConfig.Endpoint,
  58. SigningRegion: defaultConfig.Region,
  59. HostnameImmutable: true,
  60. }, nil
  61. })),
  62. )
  63. require.NoError(t, err)
  64. return s3.NewFromConfig(cfg, func(o *s3.Options) {
  65. o.UsePathStyle = true // Important for SeaweedFS
  66. })
  67. }
  68. // waitForS3Service waits for the S3 service to be ready
  69. func waitForS3Service(t *testing.T, client *s3.Client, timeout time.Duration) {
  70. start := time.Now()
  71. for time.Since(start) < timeout {
  72. _, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
  73. if err == nil {
  74. return
  75. }
  76. t.Logf("Waiting for S3 service to be ready... (error: %v)", err)
  77. time.Sleep(time.Second)
  78. }
  79. t.Fatalf("S3 service not ready after %v", timeout)
  80. }
  81. // getNewBucketName generates a unique bucket name
  82. func getNewBucketName() string {
  83. timestamp := time.Now().UnixNano()
  84. // Add random suffix to prevent collisions when tests run quickly
  85. randomSuffix := mathrand.Intn(100000)
  86. return fmt.Sprintf("%s%d-%d", defaultConfig.BucketPrefix, timestamp, randomSuffix)
  87. }
  88. // cleanupTestBuckets removes any leftover test buckets from previous runs
  89. func cleanupTestBuckets(t *testing.T, client *s3.Client) {
  90. resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
  91. if err != nil {
  92. t.Logf("Warning: failed to list buckets for cleanup: %v", err)
  93. return
  94. }
  95. for _, bucket := range resp.Buckets {
  96. bucketName := *bucket.Name
  97. // Only delete buckets that match our test prefix
  98. if strings.HasPrefix(bucketName, defaultConfig.BucketPrefix) {
  99. t.Logf("Cleaning up leftover test bucket: %s", bucketName)
  100. deleteBucket(t, client, bucketName)
  101. }
  102. }
  103. }
  104. // createBucket creates a new bucket for testing
  105. func createBucket(t *testing.T, client *s3.Client, bucketName string) {
  106. // First, try to delete the bucket if it exists (cleanup from previous failed tests)
  107. deleteBucket(t, client, bucketName)
  108. // Create the bucket
  109. _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
  110. Bucket: aws.String(bucketName),
  111. })
  112. require.NoError(t, err)
  113. }
  114. // deleteBucket deletes a bucket and all its contents
  115. func deleteBucket(t *testing.T, client *s3.Client, bucketName string) {
  116. // First, delete all objects
  117. deleteAllObjects(t, client, bucketName)
  118. // Then delete the bucket
  119. _, err := client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
  120. Bucket: aws.String(bucketName),
  121. })
  122. if err != nil {
  123. // Only log warnings for actual errors, not "bucket doesn't exist"
  124. if !strings.Contains(err.Error(), "NoSuchBucket") {
  125. t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err)
  126. }
  127. }
  128. }
  129. // deleteAllObjects deletes all objects in a bucket
  130. func deleteAllObjects(t *testing.T, client *s3.Client, bucketName string) {
  131. // List all objects
  132. paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{
  133. Bucket: aws.String(bucketName),
  134. })
  135. for paginator.HasMorePages() {
  136. page, err := paginator.NextPage(context.TODO())
  137. if err != nil {
  138. // Only log warnings for actual errors, not "bucket doesn't exist"
  139. if !strings.Contains(err.Error(), "NoSuchBucket") {
  140. t.Logf("Warning: failed to list objects in bucket %s: %v", bucketName, err)
  141. }
  142. return
  143. }
  144. if len(page.Contents) == 0 {
  145. break
  146. }
  147. var objectsToDelete []types.ObjectIdentifier
  148. for _, obj := range page.Contents {
  149. objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
  150. Key: obj.Key,
  151. })
  152. }
  153. // Delete objects in batches
  154. if len(objectsToDelete) > 0 {
  155. _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
  156. Bucket: aws.String(bucketName),
  157. Delete: &types.Delete{
  158. Objects: objectsToDelete,
  159. Quiet: aws.Bool(true),
  160. },
  161. })
  162. if err != nil {
  163. t.Logf("Warning: failed to delete objects in bucket %s: %v", bucketName, err)
  164. }
  165. }
  166. }
  167. }
  168. // putObject puts an object into a bucket
  169. func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput {
  170. resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
  171. Bucket: aws.String(bucketName),
  172. Key: aws.String(key),
  173. Body: strings.NewReader(content),
  174. })
  175. require.NoError(t, err)
  176. return resp
  177. }
  178. // putObjectWithMetadata puts an object with metadata into a bucket
  179. func putObjectWithMetadata(t *testing.T, client *s3.Client, bucketName, key, content string, metadata map[string]string, contentType string) *s3.PutObjectOutput {
  180. input := &s3.PutObjectInput{
  181. Bucket: aws.String(bucketName),
  182. Key: aws.String(key),
  183. Body: strings.NewReader(content),
  184. }
  185. if metadata != nil {
  186. input.Metadata = metadata
  187. }
  188. if contentType != "" {
  189. input.ContentType = aws.String(contentType)
  190. }
  191. resp, err := client.PutObject(context.TODO(), input)
  192. require.NoError(t, err)
  193. return resp
  194. }
  195. // getObject gets an object from a bucket
  196. func getObject(t *testing.T, client *s3.Client, bucketName, key string) *s3.GetObjectOutput {
  197. resp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{
  198. Bucket: aws.String(bucketName),
  199. Key: aws.String(key),
  200. })
  201. require.NoError(t, err)
  202. return resp
  203. }
  204. // getObjectBody gets the body content of an object
  205. func getObjectBody(t *testing.T, resp *s3.GetObjectOutput) string {
  206. body, err := io.ReadAll(resp.Body)
  207. require.NoError(t, err)
  208. resp.Body.Close()
  209. return string(body)
  210. }
  211. // generateRandomData generates random data of specified size
  212. func generateRandomData(size int) []byte {
  213. data := make([]byte, size)
  214. _, err := rand.Read(data)
  215. if err != nil {
  216. panic(err)
  217. }
  218. return data
  219. }
  220. // createCopySource creates a properly URL-encoded copy source string
  221. func createCopySource(bucketName, key string) string {
  222. // URL encode the key to handle special characters like spaces
  223. encodedKey := url.PathEscape(key)
  224. return fmt.Sprintf("%s/%s", bucketName, encodedKey)
  225. }
  226. // TestBasicPutGet tests basic S3 put and get operations
  227. func TestBasicPutGet(t *testing.T) {
  228. client := getS3Client(t)
  229. bucketName := getNewBucketName()
  230. // Create bucket
  231. createBucket(t, client, bucketName)
  232. defer deleteBucket(t, client, bucketName)
  233. // Test 1: Put and get a simple text object
  234. t.Run("Simple text object", func(t *testing.T) {
  235. key := "test-simple.txt"
  236. content := "Hello, SeaweedFS S3!"
  237. // Put object
  238. putResp := putObject(t, client, bucketName, key, content)
  239. assert.NotNil(t, putResp.ETag)
  240. // Get object
  241. getResp := getObject(t, client, bucketName, key)
  242. body := getObjectBody(t, getResp)
  243. assert.Equal(t, content, body)
  244. assert.Equal(t, putResp.ETag, getResp.ETag)
  245. })
  246. // Test 2: Put and get an empty object
  247. t.Run("Empty object", func(t *testing.T) {
  248. key := "test-empty.txt"
  249. content := ""
  250. putResp := putObject(t, client, bucketName, key, content)
  251. assert.NotNil(t, putResp.ETag)
  252. getResp := getObject(t, client, bucketName, key)
  253. body := getObjectBody(t, getResp)
  254. assert.Equal(t, content, body)
  255. assert.Equal(t, putResp.ETag, getResp.ETag)
  256. })
  257. // Test 3: Put and get a binary object
  258. t.Run("Binary object", func(t *testing.T) {
  259. key := "test-binary.bin"
  260. content := string(generateRandomData(1024)) // 1KB of random data
  261. putResp := putObject(t, client, bucketName, key, content)
  262. assert.NotNil(t, putResp.ETag)
  263. getResp := getObject(t, client, bucketName, key)
  264. body := getObjectBody(t, getResp)
  265. assert.Equal(t, content, body)
  266. assert.Equal(t, putResp.ETag, getResp.ETag)
  267. })
  268. // Test 4: Put and get object with metadata
  269. t.Run("Object with metadata", func(t *testing.T) {
  270. key := "test-metadata.txt"
  271. content := "Content with metadata"
  272. metadata := map[string]string{
  273. "author": "test",
  274. "description": "test object with metadata",
  275. }
  276. contentType := "text/plain"
  277. putResp := putObjectWithMetadata(t, client, bucketName, key, content, metadata, contentType)
  278. assert.NotNil(t, putResp.ETag)
  279. getResp := getObject(t, client, bucketName, key)
  280. body := getObjectBody(t, getResp)
  281. assert.Equal(t, content, body)
  282. assert.Equal(t, putResp.ETag, getResp.ETag)
  283. assert.Equal(t, contentType, *getResp.ContentType)
  284. assert.Equal(t, metadata["author"], getResp.Metadata["author"])
  285. assert.Equal(t, metadata["description"], getResp.Metadata["description"])
  286. })
  287. }
  288. // TestBasicBucketOperations tests basic bucket operations
  289. func TestBasicBucketOperations(t *testing.T) {
  290. client := getS3Client(t)
  291. bucketName := getNewBucketName()
  292. // Test 1: Create bucket
  293. t.Run("Create bucket", func(t *testing.T) {
  294. createBucket(t, client, bucketName)
  295. // Verify bucket exists by listing buckets
  296. resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
  297. require.NoError(t, err)
  298. found := false
  299. for _, bucket := range resp.Buckets {
  300. if *bucket.Name == bucketName {
  301. found = true
  302. break
  303. }
  304. }
  305. assert.True(t, found, "Bucket should exist after creation")
  306. })
  307. // Test 2: Put objects and list them
  308. t.Run("List objects", func(t *testing.T) {
  309. // Put multiple objects
  310. objects := []string{"test1.txt", "test2.txt", "dir/test3.txt"}
  311. for _, key := range objects {
  312. putObject(t, client, bucketName, key, fmt.Sprintf("content of %s", key))
  313. }
  314. // List objects
  315. resp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
  316. Bucket: aws.String(bucketName),
  317. })
  318. require.NoError(t, err)
  319. assert.Equal(t, len(objects), len(resp.Contents))
  320. // Verify each object exists
  321. for _, obj := range resp.Contents {
  322. found := false
  323. for _, expected := range objects {
  324. if *obj.Key == expected {
  325. found = true
  326. break
  327. }
  328. }
  329. assert.True(t, found, "Object %s should be in list", *obj.Key)
  330. }
  331. })
  332. // Test 3: Delete bucket (cleanup)
  333. t.Run("Delete bucket", func(t *testing.T) {
  334. deleteBucket(t, client, bucketName)
  335. // Verify bucket is deleted by trying to list its contents
  336. _, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
  337. Bucket: aws.String(bucketName),
  338. })
  339. assert.Error(t, err, "Bucket should not exist after deletion")
  340. })
  341. }
  342. // TestBasicLargeObject tests handling of larger objects (up to volume limit)
  343. func TestBasicLargeObject(t *testing.T) {
  344. client := getS3Client(t)
  345. bucketName := getNewBucketName()
  346. createBucket(t, client, bucketName)
  347. defer deleteBucket(t, client, bucketName)
  348. // Test with progressively larger objects
  349. sizes := []int{
  350. 1024, // 1KB
  351. 1024 * 10, // 10KB
  352. 1024 * 100, // 100KB
  353. 1024 * 1024, // 1MB
  354. 1024 * 1024 * 5, // 5MB
  355. 1024 * 1024 * 10, // 10MB
  356. }
  357. for _, size := range sizes {
  358. t.Run(fmt.Sprintf("Size_%dMB", size/(1024*1024)), func(t *testing.T) {
  359. key := fmt.Sprintf("large-object-%d.bin", size)
  360. content := string(generateRandomData(size))
  361. putResp := putObject(t, client, bucketName, key, content)
  362. assert.NotNil(t, putResp.ETag)
  363. getResp := getObject(t, client, bucketName, key)
  364. body := getObjectBody(t, getResp)
  365. assert.Equal(t, len(content), len(body))
  366. assert.Equal(t, content, body)
  367. assert.Equal(t, putResp.ETag, getResp.ETag)
  368. })
  369. }
  370. }
  371. // TestObjectCopySameBucket tests copying an object within the same bucket
  372. func TestObjectCopySameBucket(t *testing.T) {
  373. client := getS3Client(t)
  374. // Wait for S3 service to be ready
  375. waitForS3Service(t, client, 30*time.Second)
  376. bucketName := getNewBucketName()
  377. // Create bucket
  378. createBucket(t, client, bucketName)
  379. defer deleteBucket(t, client, bucketName)
  380. // Put source object
  381. sourceKey := "foo123bar"
  382. sourceContent := "foo"
  383. putObject(t, client, bucketName, sourceKey, sourceContent)
  384. // Copy object within the same bucket
  385. destKey := "bar321foo"
  386. copySource := createCopySource(bucketName, sourceKey)
  387. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  388. Bucket: aws.String(bucketName),
  389. Key: aws.String(destKey),
  390. CopySource: aws.String(copySource),
  391. })
  392. require.NoError(t, err, "Failed to copy object within same bucket")
  393. // Verify the copied object
  394. resp := getObject(t, client, bucketName, destKey)
  395. body := getObjectBody(t, resp)
  396. assert.Equal(t, sourceContent, body)
  397. }
  398. // TestObjectCopyDiffBucket tests copying an object to a different bucket
  399. func TestObjectCopyDiffBucket(t *testing.T) {
  400. client := getS3Client(t)
  401. sourceBucketName := getNewBucketName()
  402. destBucketName := getNewBucketName()
  403. // Create buckets
  404. createBucket(t, client, sourceBucketName)
  405. defer deleteBucket(t, client, sourceBucketName)
  406. createBucket(t, client, destBucketName)
  407. defer deleteBucket(t, client, destBucketName)
  408. // Put source object
  409. sourceKey := "foo123bar"
  410. sourceContent := "foo"
  411. putObject(t, client, sourceBucketName, sourceKey, sourceContent)
  412. // Copy object to different bucket
  413. destKey := "bar321foo"
  414. copySource := createCopySource(sourceBucketName, sourceKey)
  415. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  416. Bucket: aws.String(destBucketName),
  417. Key: aws.String(destKey),
  418. CopySource: aws.String(copySource),
  419. })
  420. require.NoError(t, err)
  421. // Verify the copied object
  422. resp := getObject(t, client, destBucketName, destKey)
  423. body := getObjectBody(t, resp)
  424. assert.Equal(t, sourceContent, body)
  425. }
  426. // TestObjectCopyCannedAcl tests copying with ACL settings
  427. func TestObjectCopyCannedAcl(t *testing.T) {
  428. client := getS3Client(t)
  429. bucketName := getNewBucketName()
  430. // Create bucket
  431. createBucket(t, client, bucketName)
  432. defer deleteBucket(t, client, bucketName)
  433. // Put source object
  434. sourceKey := "foo123bar"
  435. sourceContent := "foo"
  436. putObject(t, client, bucketName, sourceKey, sourceContent)
  437. // Copy object with public-read ACL
  438. destKey := "bar321foo"
  439. copySource := createCopySource(bucketName, sourceKey)
  440. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  441. Bucket: aws.String(bucketName),
  442. Key: aws.String(destKey),
  443. CopySource: aws.String(copySource),
  444. ACL: types.ObjectCannedACLPublicRead,
  445. })
  446. require.NoError(t, err)
  447. // Verify the copied object
  448. resp := getObject(t, client, bucketName, destKey)
  449. body := getObjectBody(t, resp)
  450. assert.Equal(t, sourceContent, body)
  451. // Test metadata replacement with ACL
  452. metadata := map[string]string{"abc": "def"}
  453. destKey2 := "foo123bar2"
  454. copySource2 := createCopySource(bucketName, destKey)
  455. _, err = client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  456. Bucket: aws.String(bucketName),
  457. Key: aws.String(destKey2),
  458. CopySource: aws.String(copySource2),
  459. ACL: types.ObjectCannedACLPublicRead,
  460. Metadata: metadata,
  461. MetadataDirective: types.MetadataDirectiveReplace,
  462. })
  463. require.NoError(t, err)
  464. // Verify the copied object with metadata
  465. resp2 := getObject(t, client, bucketName, destKey2)
  466. body2 := getObjectBody(t, resp2)
  467. assert.Equal(t, sourceContent, body2)
  468. assert.Equal(t, metadata, resp2.Metadata)
  469. }
  470. // TestObjectCopyRetainingMetadata tests copying while retaining metadata
  471. func TestObjectCopyRetainingMetadata(t *testing.T) {
  472. client := getS3Client(t)
  473. bucketName := getNewBucketName()
  474. // Create bucket
  475. createBucket(t, client, bucketName)
  476. defer deleteBucket(t, client, bucketName)
  477. // Test with different sizes
  478. sizes := []int{3, 1024 * 1024} // 3 bytes and 1MB
  479. for _, size := range sizes {
  480. t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
  481. sourceKey := fmt.Sprintf("foo123bar_%d", size)
  482. sourceContent := string(generateRandomData(size))
  483. contentType := "audio/ogg"
  484. metadata := map[string]string{"key1": "value1", "key2": "value2"}
  485. // Put source object with metadata
  486. putObjectWithMetadata(t, client, bucketName, sourceKey, sourceContent, metadata, contentType)
  487. // Copy object (should retain metadata)
  488. destKey := fmt.Sprintf("bar321foo_%d", size)
  489. copySource := createCopySource(bucketName, sourceKey)
  490. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  491. Bucket: aws.String(bucketName),
  492. Key: aws.String(destKey),
  493. CopySource: aws.String(copySource),
  494. })
  495. require.NoError(t, err)
  496. // Verify the copied object
  497. resp := getObject(t, client, bucketName, destKey)
  498. body := getObjectBody(t, resp)
  499. assert.Equal(t, sourceContent, body)
  500. assert.Equal(t, contentType, *resp.ContentType)
  501. assert.Equal(t, metadata, resp.Metadata)
  502. require.NotNil(t, resp.ContentLength)
  503. assert.Equal(t, int64(size), *resp.ContentLength)
  504. })
  505. }
  506. }
  507. // TestMultipartCopySmall tests multipart copying of small files
  508. func TestMultipartCopySmall(t *testing.T) {
  509. client := getS3Client(t)
  510. // Clean up any leftover buckets from previous test runs
  511. cleanupTestBuckets(t, client)
  512. sourceBucketName := getNewBucketName()
  513. destBucketName := getNewBucketName()
  514. // Create buckets
  515. createBucket(t, client, sourceBucketName)
  516. defer deleteBucket(t, client, sourceBucketName)
  517. createBucket(t, client, destBucketName)
  518. defer deleteBucket(t, client, destBucketName)
  519. // Put source object
  520. sourceKey := "foo"
  521. sourceContent := "x" // 1 byte
  522. putObject(t, client, sourceBucketName, sourceKey, sourceContent)
  523. // Create multipart upload
  524. destKey := "mymultipart"
  525. createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
  526. Bucket: aws.String(destBucketName),
  527. Key: aws.String(destKey),
  528. })
  529. require.NoError(t, err)
  530. uploadID := *createResp.UploadId
  531. // Upload part copy
  532. copySource := createCopySource(sourceBucketName, sourceKey)
  533. copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
  534. Bucket: aws.String(destBucketName),
  535. Key: aws.String(destKey),
  536. UploadId: aws.String(uploadID),
  537. PartNumber: aws.Int32(1),
  538. CopySource: aws.String(copySource),
  539. CopySourceRange: aws.String("bytes=0-0"),
  540. })
  541. require.NoError(t, err)
  542. // Complete multipart upload
  543. _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
  544. Bucket: aws.String(destBucketName),
  545. Key: aws.String(destKey),
  546. UploadId: aws.String(uploadID),
  547. MultipartUpload: &types.CompletedMultipartUpload{
  548. Parts: []types.CompletedPart{
  549. {
  550. ETag: copyResp.CopyPartResult.ETag,
  551. PartNumber: aws.Int32(1),
  552. },
  553. },
  554. },
  555. })
  556. require.NoError(t, err)
  557. // Verify the copied object
  558. resp := getObject(t, client, destBucketName, destKey)
  559. body := getObjectBody(t, resp)
  560. assert.Equal(t, sourceContent, body)
  561. require.NotNil(t, resp.ContentLength)
  562. assert.Equal(t, int64(1), *resp.ContentLength)
  563. }
  564. // TestMultipartCopyWithoutRange tests multipart copying without range specification
  565. func TestMultipartCopyWithoutRange(t *testing.T) {
  566. client := getS3Client(t)
  567. // Clean up any leftover buckets from previous test runs
  568. cleanupTestBuckets(t, client)
  569. sourceBucketName := getNewBucketName()
  570. destBucketName := getNewBucketName()
  571. // Create buckets
  572. createBucket(t, client, sourceBucketName)
  573. defer deleteBucket(t, client, sourceBucketName)
  574. createBucket(t, client, destBucketName)
  575. defer deleteBucket(t, client, destBucketName)
  576. // Put source object
  577. sourceKey := "source"
  578. sourceContent := string(generateRandomData(10))
  579. putObject(t, client, sourceBucketName, sourceKey, sourceContent)
  580. // Create multipart upload
  581. destKey := "mymultipartcopy"
  582. createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
  583. Bucket: aws.String(destBucketName),
  584. Key: aws.String(destKey),
  585. })
  586. require.NoError(t, err)
  587. uploadID := *createResp.UploadId
  588. // Upload part copy without range (should copy entire object)
  589. copySource := createCopySource(sourceBucketName, sourceKey)
  590. copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
  591. Bucket: aws.String(destBucketName),
  592. Key: aws.String(destKey),
  593. UploadId: aws.String(uploadID),
  594. PartNumber: aws.Int32(1),
  595. CopySource: aws.String(copySource),
  596. })
  597. require.NoError(t, err)
  598. // Complete multipart upload
  599. _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
  600. Bucket: aws.String(destBucketName),
  601. Key: aws.String(destKey),
  602. UploadId: aws.String(uploadID),
  603. MultipartUpload: &types.CompletedMultipartUpload{
  604. Parts: []types.CompletedPart{
  605. {
  606. ETag: copyResp.CopyPartResult.ETag,
  607. PartNumber: aws.Int32(1),
  608. },
  609. },
  610. },
  611. })
  612. require.NoError(t, err)
  613. // Verify the copied object
  614. resp := getObject(t, client, destBucketName, destKey)
  615. body := getObjectBody(t, resp)
  616. assert.Equal(t, sourceContent, body)
  617. require.NotNil(t, resp.ContentLength)
  618. assert.Equal(t, int64(10), *resp.ContentLength)
  619. }
  620. // TestMultipartCopySpecialNames tests multipart copying with special character names
  621. func TestMultipartCopySpecialNames(t *testing.T) {
  622. client := getS3Client(t)
  623. // Clean up any leftover buckets from previous test runs
  624. cleanupTestBuckets(t, client)
  625. sourceBucketName := getNewBucketName()
  626. destBucketName := getNewBucketName()
  627. // Create buckets
  628. createBucket(t, client, sourceBucketName)
  629. defer deleteBucket(t, client, sourceBucketName)
  630. createBucket(t, client, destBucketName)
  631. defer deleteBucket(t, client, destBucketName)
  632. // Test with special key names
  633. specialKeys := []string{" ", "_", "__", "?versionId"}
  634. sourceContent := "x" // 1 byte
  635. destKey := "mymultipart"
  636. for i, sourceKey := range specialKeys {
  637. t.Run(fmt.Sprintf("special_key_%d", i), func(t *testing.T) {
  638. // Put source object
  639. putObject(t, client, sourceBucketName, sourceKey, sourceContent)
  640. // Create multipart upload
  641. createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
  642. Bucket: aws.String(destBucketName),
  643. Key: aws.String(destKey),
  644. })
  645. require.NoError(t, err)
  646. uploadID := *createResp.UploadId
  647. // Upload part copy
  648. copySource := createCopySource(sourceBucketName, sourceKey)
  649. copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
  650. Bucket: aws.String(destBucketName),
  651. Key: aws.String(destKey),
  652. UploadId: aws.String(uploadID),
  653. PartNumber: aws.Int32(1),
  654. CopySource: aws.String(copySource),
  655. CopySourceRange: aws.String("bytes=0-0"),
  656. })
  657. require.NoError(t, err)
  658. // Complete multipart upload
  659. _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
  660. Bucket: aws.String(destBucketName),
  661. Key: aws.String(destKey),
  662. UploadId: aws.String(uploadID),
  663. MultipartUpload: &types.CompletedMultipartUpload{
  664. Parts: []types.CompletedPart{
  665. {
  666. ETag: copyResp.CopyPartResult.ETag,
  667. PartNumber: aws.Int32(1),
  668. },
  669. },
  670. },
  671. })
  672. require.NoError(t, err)
  673. // Verify the copied object
  674. resp := getObject(t, client, destBucketName, destKey)
  675. body := getObjectBody(t, resp)
  676. assert.Equal(t, sourceContent, body)
  677. require.NotNil(t, resp.ContentLength)
  678. assert.Equal(t, int64(1), *resp.ContentLength)
  679. })
  680. }
  681. }
  682. // TestMultipartCopyMultipleSizes tests multipart copying with various file sizes
  683. func TestMultipartCopyMultipleSizes(t *testing.T) {
  684. client := getS3Client(t)
  685. // Clean up any leftover buckets from previous test runs
  686. cleanupTestBuckets(t, client)
  687. sourceBucketName := getNewBucketName()
  688. destBucketName := getNewBucketName()
  689. // Create buckets
  690. createBucket(t, client, sourceBucketName)
  691. defer deleteBucket(t, client, sourceBucketName)
  692. createBucket(t, client, destBucketName)
  693. defer deleteBucket(t, client, destBucketName)
  694. // Put source object (12MB)
  695. sourceKey := "foo"
  696. sourceSize := 12 * 1024 * 1024
  697. sourceContent := generateRandomData(sourceSize)
  698. _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
  699. Bucket: aws.String(sourceBucketName),
  700. Key: aws.String(sourceKey),
  701. Body: bytes.NewReader(sourceContent),
  702. })
  703. require.NoError(t, err)
  704. destKey := "mymultipart"
  705. partSize := 5 * 1024 * 1024 // 5MB parts
  706. // Test different copy sizes
  707. testSizes := []int{
  708. 5 * 1024 * 1024, // 5MB
  709. 5*1024*1024 + 100*1024, // 5MB + 100KB
  710. 5*1024*1024 + 600*1024, // 5MB + 600KB
  711. 10*1024*1024 + 100*1024, // 10MB + 100KB
  712. 10*1024*1024 + 600*1024, // 10MB + 600KB
  713. 10 * 1024 * 1024, // 10MB
  714. }
  715. for _, size := range testSizes {
  716. t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
  717. // Create multipart upload
  718. createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
  719. Bucket: aws.String(destBucketName),
  720. Key: aws.String(destKey),
  721. })
  722. require.NoError(t, err)
  723. uploadID := *createResp.UploadId
  724. // Upload parts
  725. var parts []types.CompletedPart
  726. copySource := createCopySource(sourceBucketName, sourceKey)
  727. for i := 0; i < size; i += partSize {
  728. partNum := int32(len(parts) + 1)
  729. endOffset := i + partSize - 1
  730. if endOffset >= size {
  731. endOffset = size - 1
  732. }
  733. copyRange := fmt.Sprintf("bytes=%d-%d", i, endOffset)
  734. copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{
  735. Bucket: aws.String(destBucketName),
  736. Key: aws.String(destKey),
  737. UploadId: aws.String(uploadID),
  738. PartNumber: aws.Int32(partNum),
  739. CopySource: aws.String(copySource),
  740. CopySourceRange: aws.String(copyRange),
  741. })
  742. require.NoError(t, err)
  743. parts = append(parts, types.CompletedPart{
  744. ETag: copyResp.CopyPartResult.ETag,
  745. PartNumber: aws.Int32(partNum),
  746. })
  747. }
  748. // Complete multipart upload
  749. _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
  750. Bucket: aws.String(destBucketName),
  751. Key: aws.String(destKey),
  752. UploadId: aws.String(uploadID),
  753. MultipartUpload: &types.CompletedMultipartUpload{
  754. Parts: parts,
  755. },
  756. })
  757. require.NoError(t, err)
  758. // Verify the copied object
  759. resp := getObject(t, client, destBucketName, destKey)
  760. body, err := io.ReadAll(resp.Body)
  761. require.NoError(t, err)
  762. resp.Body.Close()
  763. require.NotNil(t, resp.ContentLength)
  764. assert.Equal(t, int64(size), *resp.ContentLength)
  765. assert.Equal(t, sourceContent[:size], body)
  766. })
  767. }
  768. }
  769. // TestCopyObjectIfMatchGood tests copying with matching ETag condition
  770. func TestCopyObjectIfMatchGood(t *testing.T) {
  771. client := getS3Client(t)
  772. bucketName := getNewBucketName()
  773. // Create bucket
  774. createBucket(t, client, bucketName)
  775. defer deleteBucket(t, client, bucketName)
  776. // Put source object
  777. sourceKey := "foo"
  778. sourceContent := "bar"
  779. putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
  780. // Copy object with matching ETag
  781. destKey := "bar"
  782. copySource := createCopySource(bucketName, sourceKey)
  783. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  784. Bucket: aws.String(bucketName),
  785. Key: aws.String(destKey),
  786. CopySource: aws.String(copySource),
  787. CopySourceIfMatch: putResp.ETag,
  788. })
  789. require.NoError(t, err)
  790. // Verify the copied object
  791. resp := getObject(t, client, bucketName, destKey)
  792. body := getObjectBody(t, resp)
  793. assert.Equal(t, sourceContent, body)
  794. }
  795. // TestCopyObjectIfNoneMatchFailed tests copying with non-matching ETag condition
  796. func TestCopyObjectIfNoneMatchFailed(t *testing.T) {
  797. client := getS3Client(t)
  798. bucketName := getNewBucketName()
  799. // Create bucket
  800. createBucket(t, client, bucketName)
  801. defer deleteBucket(t, client, bucketName)
  802. // Put source object
  803. sourceKey := "foo"
  804. sourceContent := "bar"
  805. putObject(t, client, bucketName, sourceKey, sourceContent)
  806. // Copy object with non-matching ETag (should succeed)
  807. destKey := "bar"
  808. copySource := createCopySource(bucketName, sourceKey)
  809. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  810. Bucket: aws.String(bucketName),
  811. Key: aws.String(destKey),
  812. CopySource: aws.String(copySource),
  813. CopySourceIfNoneMatch: aws.String("ABCORZ"),
  814. })
  815. require.NoError(t, err)
  816. // Verify the copied object
  817. resp := getObject(t, client, bucketName, destKey)
  818. body := getObjectBody(t, resp)
  819. assert.Equal(t, sourceContent, body)
  820. }
  821. // TestCopyObjectIfMatchFailed tests copying with non-matching ETag condition (should fail)
  822. func TestCopyObjectIfMatchFailed(t *testing.T) {
  823. client := getS3Client(t)
  824. bucketName := getNewBucketName()
  825. // Create bucket
  826. createBucket(t, client, bucketName)
  827. defer deleteBucket(t, client, bucketName)
  828. // Put source object
  829. sourceKey := "foo"
  830. sourceContent := "bar"
  831. putObject(t, client, bucketName, sourceKey, sourceContent)
  832. // Copy object with non-matching ETag (should fail)
  833. destKey := "bar"
  834. copySource := createCopySource(bucketName, sourceKey)
  835. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  836. Bucket: aws.String(bucketName),
  837. Key: aws.String(destKey),
  838. CopySource: aws.String(copySource),
  839. CopySourceIfMatch: aws.String("ABCORZ"),
  840. })
  841. // Should fail with precondition failed
  842. require.Error(t, err)
  843. // Note: We could check for specific error types, but SeaweedFS might return different error codes
  844. }
  845. // TestCopyObjectIfNoneMatchGood tests copying with matching ETag condition (should fail)
  846. func TestCopyObjectIfNoneMatchGood(t *testing.T) {
  847. client := getS3Client(t)
  848. bucketName := getNewBucketName()
  849. // Create bucket
  850. createBucket(t, client, bucketName)
  851. defer deleteBucket(t, client, bucketName)
  852. // Put source object
  853. sourceKey := "foo"
  854. sourceContent := "bar"
  855. putResp := putObject(t, client, bucketName, sourceKey, sourceContent)
  856. // Copy object with matching ETag for IfNoneMatch (should fail)
  857. destKey := "bar"
  858. copySource := createCopySource(bucketName, sourceKey)
  859. _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{
  860. Bucket: aws.String(bucketName),
  861. Key: aws.String(destKey),
  862. CopySource: aws.String(copySource),
  863. CopySourceIfNoneMatch: putResp.ETag,
  864. })
  865. // Should fail with precondition failed
  866. require.Error(t, err)
  867. }