@@ -28,6 +28,7 @@ import (
2828 "github.com/NVIDIA/aistore/core"
2929 "github.com/NVIDIA/aistore/core/meta"
3030 "github.com/NVIDIA/aistore/core/mock"
31+ "github.com/NVIDIA/aistore/sys"
3132 "github.com/NVIDIA/aistore/tools"
3233 "github.com/NVIDIA/aistore/tools/docker"
3334 "github.com/NVIDIA/aistore/tools/readers"
@@ -2173,6 +2174,125 @@ func TestMultipartUploadParallel(t *testing.T) {
21732174 tlog .Logfln ("parallel multipart upload completed successfully with correct content ordering" )
21742175}
21752176
2177+ func TestMultipartMaxChunks (t * testing.T ) {
2178+ tools .CheckSkip (t , & tools.SkipTestArgs {Long : true })
2179+
2180+ var (
2181+ proxyURL = tools .RandomProxyURL (t )
2182+ baseParams = tools .BaseAPIParams (proxyURL )
2183+ bck = cmn.Bck {
2184+ Name : trand .String (10 ),
2185+ Provider : apc .AIS ,
2186+ }
2187+ miniPartData = []byte ("x" ) // Minimal 1-byte data per part
2188+ )
2189+
2190+ tools .CreateBucket (t , proxyURL , bck , nil , true /*cleanup*/ )
2191+
2192+ t .Run ("exceed-limit" , func (t * testing.T ) {
2193+ var (
2194+ objName = "test-multipart-exceed-limit"
2195+ numParts = core .MaxChunkCount + 100 // Exceed limit by 100
2196+ )
2197+
2198+ uploadID , err := api .CreateMultipartUpload (baseParams , bck , objName )
2199+ tassert .CheckFatal (t , err )
2200+
2201+ err = uploadPartsInParallel (objName , uploadID , numParts , bck , miniPartData )
2202+
2203+ // We expect an error because we're exceeding MaxChunkCount
2204+ tassert .Fatalf (t , err != nil , "expected error when exceeding MaxChunkCount, but upload succeeded" )
2205+ herr := cmn .UnwrapErrHTTP (err )
2206+ tassert .Fatalf (t , herr != nil , "expected ErrHTTP, got %v" , err )
2207+ tassert .Fatalf (t , strings .Contains (herr .Message , "exceeds the maximum allowed" ),
2208+ "expected error message to contain 'exceeds the maximum allowed', got: %v" , err )
2209+
2210+ tlog .Logfln ("multipart upload correctly rejected when exceeding MaxChunkCount (%d)" , core .MaxChunkCount )
2211+
2212+ // Cleanup: abort the upload
2213+ _ = api .AbortMultipartUpload (baseParams , bck , objName , uploadID )
2214+ })
2215+
2216+ t .Run ("equal-to-limit" , func (t * testing.T ) {
2217+ var (
2218+ objName = "test-multipart-at-limit"
2219+ numParts = core .MaxChunkCount // Exactly at the limit
2220+ )
2221+
2222+ uploadID , err := api .CreateMultipartUpload (baseParams , bck , objName )
2223+ tassert .CheckFatal (t , err )
2224+ tassert .Fatalf (t , uploadID != "" , "upload ID should not be empty" )
2225+
2226+ // Upload parts in parallel - all should succeed
2227+ err = uploadPartsInParallel (objName , uploadID , numParts , bck , miniPartData )
2228+ tassert .CheckFatal (t , err )
2229+
2230+ // Complete multipart upload
2231+ partNumbers := make ([]int , numParts )
2232+ for i := range numParts {
2233+ partNumbers [i ] = i + 1
2234+ }
2235+ err = api .CompleteMultipartUpload (baseParams , bck , objName , uploadID , partNumbers )
2236+ tassert .CheckFatal (t , err )
2237+
2238+ tlog .Logfln ("multipart upload completed successfully with %d parts at MaxChunkCount" , numParts )
2239+
2240+ // Verify the uploaded object
2241+ hargs := api.HeadArgs {FltPresence : apc .FltPresent }
2242+ objAttrs , err := api .HeadObject (baseParams , bck , objName , hargs )
2243+ tassert .CheckFatal (t , err )
2244+
2245+ expectedSize := int64 (numParts * len (miniPartData ))
2246+ tassert .Fatalf (t , objAttrs .Size == expectedSize , "object size mismatch: expected %d, got %d" , expectedSize , objAttrs .Size )
2247+
2248+ // GET the object and validate content
2249+ writer := bytes .NewBuffer (nil )
2250+ getArgs := api.GetArgs {Writer : writer }
2251+ _ , err = api .GetObject (baseParams , bck , objName , & getArgs )
2252+ tassert .CheckFatal (t , err )
2253+
2254+ downloadedContent := writer .Bytes ()
2255+ tassert .Errorf (t , len (downloadedContent ) == numParts ,
2256+ "content length mismatch: expected %d bytes, got %d" , numParts , len (downloadedContent ))
2257+
2258+ // Validate all bytes are 'x'
2259+ for i , b := range downloadedContent {
2260+ if b != 'x' {
2261+ t .Fatalf ("byte at position %d is %q, expected 'x'" , i , b )
2262+ }
2263+ }
2264+
2265+ tlog .Logfln ("object content validated successfully: %d bytes, all 'x'" , len (downloadedContent ))
2266+ })
2267+ }
2268+
2269+ func uploadPartsInParallel (objName , uploadID string , numParts int , bck cmn.Bck , data []byte ) error {
2270+ g := errgroup.Group {}
2271+ g .SetLimit (sys .MaxParallelism ())
2272+
2273+ for partNum := 1 ; partNum <= numParts ; partNum ++ {
2274+ pn := partNum
2275+
2276+ g .Go (func () error {
2277+ putPartArgs := & api.PutPartArgs {
2278+ PutArgs : api.PutArgs {
2279+ BaseParams : baseParams ,
2280+ Bck : bck ,
2281+ ObjName : objName ,
2282+ Reader : readers .NewBytes (data ),
2283+ Size : uint64 (len (data )),
2284+ },
2285+ UploadID : uploadID ,
2286+ PartNumber : pn ,
2287+ }
2288+
2289+ return api .UploadPart (putPartArgs )
2290+ })
2291+ }
2292+
2293+ return g .Wait ()
2294+ }
2295+
21762296func TestMultipartUploadAbort (t * testing.T ) {
21772297 var (
21782298 proxyURL = tools .RandomProxyURL (t )
0 commit comments