s3api_object_handlers_multipart.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. package s3api
  2. import (
  3. "crypto/rand"
  4. "crypto/sha1"
  5. "encoding/base64"
  6. "encoding/json"
  7. "encoding/xml"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "net/http"
  12. "net/url"
  13. "strconv"
  14. "strings"
  15. "github.com/aws/aws-sdk-go/aws"
  16. "github.com/aws/aws-sdk-go/service/s3"
  17. "github.com/google/uuid"
  18. "github.com/seaweedfs/seaweedfs/weed/glog"
  19. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  20. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  21. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  22. weed_server "github.com/seaweedfs/seaweedfs/weed/server"
  23. stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
  24. )
  25. const (
  26. maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
  27. maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
  28. maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
  29. )
  30. // NewMultipartUploadHandler - New multipart upload.
  31. func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
  32. bucket, object := s3_constants.GetBucketAndObject(r)
  33. // Check if bucket exists before creating multipart upload
  34. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  35. s3err.WriteErrorResponse(w, r, err)
  36. return
  37. }
  38. // Check if versioning is enabled for the bucket (needed for object lock)
  39. versioningEnabled, err := s3a.isVersioningEnabled(bucket)
  40. if err != nil {
  41. if errors.Is(err, filer_pb.ErrNotFound) {
  42. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
  43. return
  44. }
  45. glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err)
  46. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  47. return
  48. }
  49. // Validate object lock headers before processing
  50. if err := s3a.validateObjectLockHeaders(r, versioningEnabled); err != nil {
  51. glog.V(2).Infof("NewMultipartUploadHandler: object lock header validation failed for bucket %s, object %s: %v", bucket, object, err)
  52. s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err))
  53. return
  54. }
  55. createMultipartUploadInput := &s3.CreateMultipartUploadInput{
  56. Bucket: aws.String(bucket),
  57. Key: objectKey(aws.String(object)),
  58. Metadata: make(map[string]*string),
  59. }
  60. metadata := weed_server.SaveAmzMetaData(r, nil, false)
  61. for k, v := range metadata {
  62. createMultipartUploadInput.Metadata[k] = aws.String(string(v))
  63. }
  64. contentType := r.Header.Get("Content-Type")
  65. if contentType != "" {
  66. createMultipartUploadInput.ContentType = &contentType
  67. }
  68. response, errCode := s3a.createMultipartUpload(r, createMultipartUploadInput)
  69. glog.V(3).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
  70. if errCode != s3err.ErrNone {
  71. s3err.WriteErrorResponse(w, r, errCode)
  72. return
  73. }
  74. writeSuccessResponseXML(w, r, response)
  75. }
  76. // CompleteMultipartUploadHandler - Completes multipart upload.
  77. func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
  78. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
  79. bucket, object := s3_constants.GetBucketAndObject(r)
  80. // Check if bucket exists before completing multipart upload
  81. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  82. s3err.WriteErrorResponse(w, r, err)
  83. return
  84. }
  85. parts := &CompleteMultipartUpload{}
  86. if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil {
  87. s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
  88. return
  89. }
  90. // Get upload id.
  91. uploadID, _, _, _ := getObjectResources(r.URL.Query())
  92. err := s3a.checkUploadId(object, uploadID)
  93. if err != nil {
  94. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
  95. return
  96. }
  97. // Check conditional headers before completing multipart upload
  98. // This implements AWS S3 behavior where conditional headers apply to CompleteMultipartUpload
  99. if errCode := s3a.checkConditionalHeaders(r, bucket, object); errCode != s3err.ErrNone {
  100. glog.V(3).Infof("CompleteMultipartUploadHandler: Conditional header check failed for %s/%s", bucket, object)
  101. s3err.WriteErrorResponse(w, r, errCode)
  102. return
  103. }
  104. response, errCode := s3a.completeMultipartUpload(r, &s3.CompleteMultipartUploadInput{
  105. Bucket: aws.String(bucket),
  106. Key: objectKey(aws.String(object)),
  107. UploadId: aws.String(uploadID),
  108. }, parts)
  109. glog.V(3).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
  110. if errCode != s3err.ErrNone {
  111. s3err.WriteErrorResponse(w, r, errCode)
  112. return
  113. }
  114. // Set version ID in HTTP header if present
  115. if response.VersionId != nil {
  116. w.Header().Set("x-amz-version-id", *response.VersionId)
  117. }
  118. stats_collect.RecordBucketActiveTime(bucket)
  119. stats_collect.S3UploadedObjectsCounter.WithLabelValues(bucket).Inc()
  120. writeSuccessResponseXML(w, r, response)
  121. }
  122. // AbortMultipartUploadHandler - Aborts multipart upload.
  123. func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
  124. bucket, object := s3_constants.GetBucketAndObject(r)
  125. // Check if bucket exists before aborting multipart upload
  126. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  127. s3err.WriteErrorResponse(w, r, err)
  128. return
  129. }
  130. // Get upload id.
  131. uploadID, _, _, _ := getObjectResources(r.URL.Query())
  132. err := s3a.checkUploadId(object, uploadID)
  133. if err != nil {
  134. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
  135. return
  136. }
  137. response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
  138. Bucket: aws.String(bucket),
  139. Key: objectKey(aws.String(object)),
  140. UploadId: aws.String(uploadID),
  141. })
  142. if errCode != s3err.ErrNone {
  143. s3err.WriteErrorResponse(w, r, errCode)
  144. return
  145. }
  146. glog.V(3).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
  147. //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
  148. s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
  149. s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone)
  150. }
  151. // ListMultipartUploadsHandler - Lists multipart uploads.
  152. func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
  153. bucket, _ := s3_constants.GetBucketAndObject(r)
  154. // Check if bucket exists before listing multipart uploads
  155. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  156. s3err.WriteErrorResponse(w, r, err)
  157. return
  158. }
  159. prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
  160. if maxUploads < 0 {
  161. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxUploads)
  162. return
  163. }
  164. if keyMarker != "" {
  165. // Marker not common with prefix is not implemented.
  166. if !strings.HasPrefix(keyMarker, prefix) {
  167. s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
  168. return
  169. }
  170. }
  171. response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
  172. Bucket: aws.String(bucket),
  173. Delimiter: aws.String(delimiter),
  174. EncodingType: aws.String(encodingType),
  175. KeyMarker: aws.String(keyMarker),
  176. MaxUploads: aws.Int64(int64(maxUploads)),
  177. Prefix: aws.String(prefix),
  178. UploadIdMarker: aws.String(uploadIDMarker),
  179. })
  180. glog.V(3).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode)
  181. if errCode != s3err.ErrNone {
  182. s3err.WriteErrorResponse(w, r, errCode)
  183. return
  184. }
  185. // TODO handle encodingType
  186. writeSuccessResponseXML(w, r, response)
  187. }
  188. // ListObjectPartsHandler - Lists object parts in a multipart upload.
  189. func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
  190. bucket, object := s3_constants.GetBucketAndObject(r)
  191. // Check if bucket exists before listing object parts
  192. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  193. s3err.WriteErrorResponse(w, r, err)
  194. return
  195. }
  196. uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
  197. if partNumberMarker < 0 {
  198. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPartNumberMarker)
  199. return
  200. }
  201. if maxParts < 0 {
  202. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
  203. return
  204. }
  205. err := s3a.checkUploadId(object, uploadID)
  206. if err != nil {
  207. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
  208. return
  209. }
  210. response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
  211. Bucket: aws.String(bucket),
  212. Key: objectKey(aws.String(object)),
  213. MaxParts: aws.Int64(int64(maxParts)),
  214. PartNumberMarker: aws.Int64(int64(partNumberMarker)),
  215. UploadId: aws.String(uploadID),
  216. })
  217. if errCode != s3err.ErrNone {
  218. s3err.WriteErrorResponse(w, r, errCode)
  219. return
  220. }
  221. glog.V(3).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part))
  222. writeSuccessResponseXML(w, r, response)
  223. }
  224. // PutObjectPartHandler - Put an object part in a multipart upload.
  225. func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
  226. bucket, object := s3_constants.GetBucketAndObject(r)
  227. // Check if bucket exists before putting object part
  228. if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
  229. s3err.WriteErrorResponse(w, r, err)
  230. return
  231. }
  232. uploadID := r.URL.Query().Get("uploadId")
  233. err := s3a.checkUploadId(object, uploadID)
  234. if err != nil {
  235. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
  236. return
  237. }
  238. partIDString := r.URL.Query().Get("partNumber")
  239. partID, err := strconv.Atoi(partIDString)
  240. if err != nil {
  241. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
  242. return
  243. }
  244. if partID > s3_constants.MaxS3MultipartParts {
  245. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
  246. return
  247. }
  248. if partID < 1 {
  249. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
  250. return
  251. }
  252. dataReader, s3ErrCode := getRequestDataReader(s3a, r)
  253. if s3ErrCode != s3err.ErrNone {
  254. s3err.WriteErrorResponse(w, r, s3ErrCode)
  255. return
  256. }
  257. defer dataReader.Close()
  258. glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID)
  259. // Check for SSE-C headers in the current request first
  260. sseCustomerAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm)
  261. if sseCustomerAlgorithm != "" {
  262. glog.Infof("PutObjectPartHandler: detected SSE-C headers, handling as SSE-C part upload")
  263. // SSE-C part upload - headers are already present, let putToFiler handle it
  264. } else {
  265. // No SSE-C headers, check for SSE-KMS settings from upload directory
  266. glog.Infof("PutObjectPartHandler: attempting to retrieve upload entry for bucket %s, uploadID %s", bucket, uploadID)
  267. if uploadEntry, err := s3a.getEntry(s3a.genUploadsFolder(bucket), uploadID); err == nil {
  268. glog.Infof("PutObjectPartHandler: upload entry found, Extended metadata: %v", uploadEntry.Extended != nil)
  269. if uploadEntry.Extended != nil {
  270. // Check if this upload uses SSE-KMS
  271. glog.Infof("PutObjectPartHandler: checking for SSE-KMS key in extended metadata")
  272. if keyIDBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSKeyID]; exists {
  273. keyID := string(keyIDBytes)
  274. // Build SSE-KMS metadata for this part
  275. bucketKeyEnabled := false
  276. if bucketKeyBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSBucketKeyEnabled]; exists && string(bucketKeyBytes) == "true" {
  277. bucketKeyEnabled = true
  278. }
  279. var encryptionContext map[string]string
  280. if contextBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSEncryptionContext]; exists {
  281. // Parse the stored encryption context
  282. if err := json.Unmarshal(contextBytes, &encryptionContext); err != nil {
  283. glog.Errorf("Failed to parse encryption context for upload %s: %v", uploadID, err)
  284. encryptionContext = BuildEncryptionContext(bucket, object, bucketKeyEnabled)
  285. }
  286. } else {
  287. encryptionContext = BuildEncryptionContext(bucket, object, bucketKeyEnabled)
  288. }
  289. // Get the base IV for this multipart upload
  290. var baseIV []byte
  291. if baseIVBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSBaseIV]; exists {
  292. // Decode the base64 encoded base IV
  293. decodedIV, decodeErr := base64.StdEncoding.DecodeString(string(baseIVBytes))
  294. if decodeErr == nil && len(decodedIV) == 16 {
  295. baseIV = decodedIV
  296. glog.V(4).Infof("Using stored base IV %x for multipart upload %s", baseIV[:8], uploadID)
  297. } else {
  298. glog.Errorf("Failed to decode base IV for multipart upload %s: %v", uploadID, decodeErr)
  299. }
  300. }
  301. if len(baseIV) == 0 {
  302. glog.Errorf("No valid base IV found for SSE-KMS multipart upload %s", uploadID)
  303. // Generate a new base IV as fallback
  304. baseIV = make([]byte, 16)
  305. if _, err := rand.Read(baseIV); err != nil {
  306. glog.Errorf("Failed to generate fallback base IV: %v", err)
  307. }
  308. }
  309. // Add SSE-KMS headers to the request for putToFiler to handle encryption
  310. r.Header.Set(s3_constants.AmzServerSideEncryption, "aws:kms")
  311. r.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, keyID)
  312. if bucketKeyEnabled {
  313. r.Header.Set(s3_constants.AmzServerSideEncryptionBucketKeyEnabled, "true")
  314. }
  315. if len(encryptionContext) > 0 {
  316. if contextJSON, err := json.Marshal(encryptionContext); err == nil {
  317. r.Header.Set(s3_constants.AmzServerSideEncryptionContext, base64.StdEncoding.EncodeToString(contextJSON))
  318. }
  319. }
  320. // Pass the base IV to putToFiler via header
  321. r.Header.Set(s3_constants.SeaweedFSSSEKMSBaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
  322. glog.Infof("PutObjectPartHandler: inherited SSE-KMS settings from upload %s, keyID %s - letting putToFiler handle encryption", uploadID, keyID)
  323. } else {
  324. // Check if this upload uses SSE-S3
  325. if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil {
  326. glog.Errorf("Failed to setup SSE-S3 multipart headers: %v", err)
  327. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  328. return
  329. }
  330. }
  331. }
  332. } else {
  333. glog.Infof("PutObjectPartHandler: failed to retrieve upload entry: %v", err)
  334. }
  335. }
  336. uploadUrl := s3a.genPartUploadUrl(bucket, uploadID, partID)
  337. if partID == 1 && r.Header.Get("Content-Type") == "" {
  338. dataReader = mimeDetect(r, dataReader)
  339. }
  340. destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
  341. etag, errCode, _ := s3a.putToFiler(r, uploadUrl, dataReader, destination, bucket, partID)
  342. if errCode != s3err.ErrNone {
  343. s3err.WriteErrorResponse(w, r, errCode)
  344. return
  345. }
  346. setEtag(w, etag)
  347. writeSuccessResponseEmpty(w, r)
  348. }
  349. func (s3a *S3ApiServer) genUploadsFolder(bucket string) string {
  350. return fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, s3_constants.MultipartUploadsFolder)
  351. }
  352. func (s3a *S3ApiServer) genPartUploadUrl(bucket, uploadID string, partID int) string {
  353. return fmt.Sprintf("http://%s%s/%s/%04d_%s.part",
  354. s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID, uuid.NewString())
  355. }
  356. // Generate uploadID hash string from object
  357. func (s3a *S3ApiServer) generateUploadID(object string) string {
  358. object = strings.TrimPrefix(object, "/")
  359. h := sha1.New()
  360. h.Write([]byte(object))
  361. return fmt.Sprintf("%x", h.Sum(nil))
  362. }
  363. // Check object name and uploadID when processing multipart uploading
  364. func (s3a *S3ApiServer) checkUploadId(object string, id string) error {
  365. hash := s3a.generateUploadID(object)
  366. if !strings.HasPrefix(id, hash) {
  367. glog.Errorf("object %s and uploadID %s are not matched", object, id)
  368. return fmt.Errorf("object %s and uploadID %s are not matched", object, id)
  369. }
  370. return nil
  371. }
  372. // Parse bucket url queries for ?uploads
  373. func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
  374. prefix = values.Get("prefix")
  375. keyMarker = values.Get("key-marker")
  376. uploadIDMarker = values.Get("upload-id-marker")
  377. delimiter = values.Get("delimiter")
  378. if values.Get("max-uploads") != "" {
  379. maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
  380. } else {
  381. maxUploads = maxUploadsList
  382. }
  383. encodingType = values.Get("encoding-type")
  384. return
  385. }
  386. // Parse object url queries
  387. func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
  388. uploadID = values.Get("uploadId")
  389. partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
  390. if values.Get("max-parts") != "" {
  391. maxParts, _ = strconv.Atoi(values.Get("max-parts"))
  392. } else {
  393. maxParts = maxPartsList
  394. }
  395. encodingType = values.Get("encoding-type")
  396. return
  397. }
  398. func xmlDecoder(body io.Reader, v interface{}, size int64) error {
  399. var lbody io.Reader
  400. if size > 0 {
  401. lbody = io.LimitReader(body, size)
  402. } else {
  403. lbody = body
  404. }
  405. d := xml.NewDecoder(lbody)
  406. d.CharsetReader = func(label string, input io.Reader) (io.Reader, error) {
  407. return input, nil
  408. }
  409. return d.Decode(v)
  410. }
  411. type CompleteMultipartUpload struct {
  412. Parts []CompletedPart `xml:"Part"`
  413. }
  414. type CompletedPart struct {
  415. ETag string
  416. PartNumber int
  417. }
  418. // handleSSES3MultipartHeaders handles SSE-S3 multipart upload header setup to reduce nesting complexity
  419. func (s3a *S3ApiServer) handleSSES3MultipartHeaders(r *http.Request, uploadEntry *filer_pb.Entry, uploadID string) error {
  420. glog.Infof("PutObjectPartHandler: checking for SSE-S3 settings in extended metadata")
  421. if encryptionTypeBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3Encryption]; exists && string(encryptionTypeBytes) == s3_constants.SSEAlgorithmAES256 {
  422. glog.Infof("PutObjectPartHandler: found SSE-S3 encryption type, setting up headers")
  423. // Set SSE-S3 headers to indicate server-side encryption
  424. r.Header.Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256)
  425. // Retrieve and set base IV for consistent multipart encryption - REQUIRED for security
  426. var baseIV []byte
  427. if baseIVBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3BaseIV]; exists {
  428. // Decode the base64 encoded base IV
  429. decodedIV, decodeErr := base64.StdEncoding.DecodeString(string(baseIVBytes))
  430. if decodeErr != nil {
  431. return fmt.Errorf("failed to decode base IV for SSE-S3 multipart upload %s: %v", uploadID, decodeErr)
  432. }
  433. if len(decodedIV) != s3_constants.AESBlockSize {
  434. return fmt.Errorf("invalid base IV length for SSE-S3 multipart upload %s: expected %d bytes, got %d", uploadID, s3_constants.AESBlockSize, len(decodedIV))
  435. }
  436. baseIV = decodedIV
  437. glog.V(4).Infof("Using stored base IV %x for SSE-S3 multipart upload %s", baseIV[:8], uploadID)
  438. } else {
  439. return fmt.Errorf("no base IV found for SSE-S3 multipart upload %s - required for encryption consistency", uploadID)
  440. }
  441. // Retrieve and set key data for consistent multipart encryption - REQUIRED for decryption
  442. if keyDataBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3KeyData]; exists {
  443. // Key data is already base64 encoded, pass it directly
  444. keyDataStr := string(keyDataBytes)
  445. r.Header.Set(s3_constants.SeaweedFSSSES3KeyDataHeader, keyDataStr)
  446. glog.V(4).Infof("Using stored key data for SSE-S3 multipart upload %s", uploadID)
  447. } else {
  448. return fmt.Errorf("no SSE-S3 key data found for multipart upload %s - required for encryption", uploadID)
  449. }
  450. // Pass the base IV to putToFiler via header for offset calculation
  451. r.Header.Set(s3_constants.SeaweedFSSSES3BaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
  452. glog.Infof("PutObjectPartHandler: inherited SSE-S3 settings from upload %s - letting putToFiler handle encryption", uploadID)
  453. }
  454. return nil
  455. }