Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix truncating existing files #4448

Merged
merged 12 commits into from Jan 11, 2024
2 changes: 1 addition & 1 deletion .drone.star
Expand Up @@ -5,7 +5,7 @@ OSIXIA_OPEN_LDAP = "osixia/openldap:1.3.0"
REDIS = "redis:6-alpine"
OC_CI_PHP = "cs3org/behat:latest"
OC_LITMUS = "owncloud/litmus:latest"
OC_CS3_API_VALIDATOR = "owncloud/cs3api-validator:0.2.0"
OC_CS3_API_VALIDATOR = "owncloud/cs3api-validator:0.2.1"
OC_CI_BAZEL_BUILDIFIER = "owncloudci/bazel-buildifier:latest"

# Shared step definitions
Expand Down
5 changes: 5 additions & 0 deletions changelog/unreleased/fix-truncating-files.md
@@ -0,0 +1,5 @@
Bugfix: Fix truncating existing files

We fixed a problem where existing files kept their content when being overwritten by a 0-byte file.

https://github.com/cs3org/reva/pull/4448
88 changes: 44 additions & 44 deletions internal/http/services/owncloud/ocdav/put.go
Expand Up @@ -292,58 +292,58 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
}

// ony send actual PUT request if file has bytes. Otherwise the initiate file upload request creates the file
// if length != 0 { // FIXME bring back 0 byte file upload handling, see https://github.com/owncloud/ocis/issues/2609

var ep, token string
for _, p := range uRes.Protocols {
if p.Protocol == "simple" {
ep, token = p.UploadEndpoint, p.Token
if length != 0 {
var ep, token string
for _, p := range uRes.Protocols {
if p.Protocol == "simple" {
ep, token = p.UploadEndpoint, p.Token
}
}
}

httpReq, err := rhttp.NewRequest(ctx, http.MethodPut, ep, r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(datagateway.TokenTransportHeader, token)
httpReq, err := rhttp.NewRequest(ctx, http.MethodPut, ep, r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(datagateway.TokenTransportHeader, token)

httpRes, err := s.client.Do(httpReq)
if err != nil {
log.Error().Err(err).Msg("error doing PUT request to data service")
w.WriteHeader(http.StatusInternalServerError)
return
}
defer httpRes.Body.Close()
if httpRes.StatusCode != http.StatusOK {
if httpRes.StatusCode == http.StatusPartialContent {
w.WriteHeader(http.StatusPartialContent)
httpRes, err := s.client.Do(httpReq)
if err != nil {
log.Error().Err(err).Msg("error doing PUT request to data service")
w.WriteHeader(http.StatusInternalServerError)
return
}
if httpRes.StatusCode == errtypes.StatusChecksumMismatch {
w.WriteHeader(http.StatusBadRequest)
b, err := errors.Marshal(http.StatusBadRequest, "The computed checksum does not match the one received from the client.", "")
errors.HandleWebdavError(&log, w, b, err)
defer httpRes.Body.Close()
if httpRes.StatusCode != http.StatusOK {
if httpRes.StatusCode == http.StatusPartialContent {
w.WriteHeader(http.StatusPartialContent)
return
}
if httpRes.StatusCode == errtypes.StatusChecksumMismatch {
w.WriteHeader(http.StatusBadRequest)
b, err := errors.Marshal(http.StatusBadRequest, "The computed checksum does not match the one received from the client.", "")
errors.HandleWebdavError(&log, w, b, err)
return
}
log.Error().Err(err).Msg("PUT request to data server failed")
w.WriteHeader(httpRes.StatusCode)
return
}
log.Error().Err(err).Msg("PUT request to data server failed")
w.WriteHeader(httpRes.StatusCode)
return
}

// copy headers if they are present
if httpRes.Header.Get(net.HeaderETag) != "" {
w.Header().Set(net.HeaderETag, httpRes.Header.Get(net.HeaderETag))
}
if httpRes.Header.Get(net.HeaderOCETag) != "" {
w.Header().Set(net.HeaderOCETag, httpRes.Header.Get(net.HeaderOCETag))
}
if httpRes.Header.Get(net.HeaderOCFileID) != "" {
w.Header().Set(net.HeaderOCFileID, httpRes.Header.Get(net.HeaderOCFileID))
}
if httpRes.Header.Get(net.HeaderLastModified) != "" {
w.Header().Set(net.HeaderLastModified, httpRes.Header.Get(net.HeaderLastModified))
// copy headers if they are present
if httpRes.Header.Get(net.HeaderETag) != "" {
w.Header().Set(net.HeaderETag, httpRes.Header.Get(net.HeaderETag))
}
if httpRes.Header.Get(net.HeaderOCETag) != "" {
w.Header().Set(net.HeaderOCETag, httpRes.Header.Get(net.HeaderOCETag))
}
if httpRes.Header.Get(net.HeaderOCFileID) != "" {
w.Header().Set(net.HeaderOCFileID, httpRes.Header.Get(net.HeaderOCFileID))
}
if httpRes.Header.Get(net.HeaderLastModified) != "" {
w.Header().Set(net.HeaderLastModified, httpRes.Header.Get(net.HeaderLastModified))
}
}

// file was new
Expand Down
87 changes: 42 additions & 45 deletions internal/http/services/owncloud/ocdav/tus.go
Expand Up @@ -252,56 +252,58 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.
// for creation-with-upload extension forward bytes to dataprovider
// TODO check this really streams
if r.Header.Get(net.HeaderContentType) == "application/offset+octet-stream" {
length, err := strconv.ParseInt(r.Header.Get(net.HeaderContentLength), 10, 64)
if err != nil {
log.Debug().Err(err).Msg("wrong request")
w.WriteHeader(http.StatusBadRequest)
return
}
var httpRes *http.Response
finishUpload := true
if uploadLength > 0 {
var httpRes *http.Response

httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body)
if err != nil {
log.Debug().Err(err).Msg("wrong request")
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))

httpReq.Header.Set(net.HeaderContentType, r.Header.Get(net.HeaderContentType))
httpReq.Header.Set(net.HeaderContentLength, r.Header.Get(net.HeaderContentLength))
if r.Header.Get(net.HeaderUploadOffset) != "" {
httpReq.Header.Set(net.HeaderUploadOffset, r.Header.Get(net.HeaderUploadOffset))
} else {
httpReq.Header.Set(net.HeaderUploadOffset, "0")
}
httpReq.Header.Set(net.HeaderTusResumable, r.Header.Get(net.HeaderTusResumable))
httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body)
if err != nil {
log.Debug().Err(err).Msg("wrong request")
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))

httpReq.Header.Set(net.HeaderContentType, r.Header.Get(net.HeaderContentType))
httpReq.Header.Set(net.HeaderContentLength, r.Header.Get(net.HeaderContentLength))
if r.Header.Get(net.HeaderUploadOffset) != "" {
httpReq.Header.Set(net.HeaderUploadOffset, r.Header.Get(net.HeaderUploadOffset))
} else {
httpReq.Header.Set(net.HeaderUploadOffset, "0")
}
httpReq.Header.Set(net.HeaderTusResumable, r.Header.Get(net.HeaderTusResumable))

httpRes, err = s.client.Do(httpReq)
if err != nil {
log.Error().Err(err).Msg("error doing PATCH request to data gateway")
w.WriteHeader(http.StatusInternalServerError)
return
}
defer httpRes.Body.Close()
httpRes, err = s.client.Do(httpReq)
if err != nil || httpRes == nil {
log.Error().Err(err).Msg("error doing PATCH request to data gateway")
w.WriteHeader(http.StatusInternalServerError)
return
}
defer httpRes.Body.Close()

w.Header().Set(net.HeaderUploadOffset, httpRes.Header.Get(net.HeaderUploadOffset))
w.Header().Set(net.HeaderTusResumable, httpRes.Header.Get(net.HeaderTusResumable))
w.Header().Set(net.HeaderTusUploadExpires, httpRes.Header.Get(net.HeaderTusUploadExpires))
if httpRes.StatusCode != http.StatusNoContent {
w.WriteHeader(httpRes.StatusCode)
return
}
if httpRes.StatusCode != http.StatusNoContent {
w.WriteHeader(httpRes.StatusCode)
return
}

// check if upload was fully completed
if length == 0 || httpRes.Header.Get(net.HeaderUploadOffset) == r.Header.Get(net.HeaderUploadLength) {
// get uploaded file metadata
w.Header().Set(net.HeaderUploadOffset, httpRes.Header.Get(net.HeaderUploadOffset))
w.Header().Set(net.HeaderTusResumable, httpRes.Header.Get(net.HeaderTusResumable))
w.Header().Set(net.HeaderTusUploadExpires, httpRes.Header.Get(net.HeaderTusUploadExpires))
if httpRes.Header.Get(net.HeaderOCMtime) != "" {
w.Header().Set(net.HeaderOCMtime, httpRes.Header.Get(net.HeaderOCMtime))
}

if resid, err := storagespace.ParseID(httpRes.Header.Get(net.HeaderOCFileID)); err == nil {
sReq.Ref = &provider.Reference{
ResourceId: &resid,
}
}
finishUpload = httpRes.Header.Get(net.HeaderUploadOffset) == r.Header.Get(net.HeaderUploadLength)
}

// check if upload was fully completed
if uploadLength == 0 || finishUpload {
// get uploaded file metadata

sRes, err := client.Stat(ctx, sReq)
if err != nil {
Expand All @@ -311,7 +313,6 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.
}

if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND {

if sRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED {
// the token expired during upload, so the stat failed
// and we can't do anything about it.
Expand All @@ -330,10 +331,6 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.
w.WriteHeader(http.StatusInternalServerError)
return
}
if httpRes != nil && httpRes.Header != nil && httpRes.Header.Get(net.HeaderOCMtime) != "" {
// set the "accepted" value if returned in the upload response headers
w.Header().Set(net.HeaderOCMtime, httpRes.Header.Get(net.HeaderOCMtime))
}

// get WebDav permissions for file
isPublic := false
Expand Down
3 changes: 2 additions & 1 deletion pkg/storage/fs/nextcloud/nextcloud_server_mock.go
Expand Up @@ -113,7 +113,8 @@ var responses = map[string]Response{

`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/GetMD {"ref":{"path":"/file"},"mdKeys":null}`: {404, ``, serverStateEmpty},
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"path":"/file"},"uploadLength":0,"metadata":{"providerID":""}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty},
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"resource_id":{"storage_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"path":"/versionedFile"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty},
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"resource_id":{"storage_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"path":"/versionedFile"},"uploadLength":1,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty},
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"resource_id":{"storage_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"path":"/versionedFile"},"uploadLength":2,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty},

`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/GetMD {"ref":{"path":"/yes"},"mdKeys":[]}`: {200, `{"opaque":{},"type":1,"id":{"opaque_id":"fileid-/yes"},"checksum":{},"etag":"deadbeef","mime_type":"text/plain","mtime":{"seconds":1234567890},"path":"/yes","permission_set":{},"size":1,"canonical_metadata":{},"arbitrary_metadata":{}}`, serverStateEmpty},

Expand Down
8 changes: 8 additions & 0 deletions pkg/storage/utils/decomposedfs/upload.go
Expand Up @@ -299,6 +299,14 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere

metrics.UploadSessionsInitiated.Inc()

if uploadLength == 0 {
// Directly finish this upload
err = session.FinishUpload(ctx)
if err != nil {
return nil, err
}
}

return map[string]string{
"simple": session.ID(),
"tus": session.ID(),
Expand Down
47 changes: 14 additions & 33 deletions pkg/storage/utils/decomposedfs/upload_test.go
Expand Up @@ -230,6 +230,8 @@ var _ = Describe("File uploads", func() {

When("the user initiates a zero byte file upload", func() {
It("succeeds", func() {
bs.On("Upload", mock.AnythingOfType("*node.Node"), mock.AnythingOfType("string"), mock.Anything).
Return(nil)
uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{})

Expect(err).ToNot(HaveOccurred())
Expand All @@ -239,58 +241,37 @@ var _ = Describe("File uploads", func() {

resources, err := fs.ListFolder(ctx, rootRef, []string{}, []string{})
Expect(err).ToNot(HaveOccurred())
Expect(len(resources)).To(Equal(0))
Expect(len(resources)).To(Equal(1))
})
})

When("the user uploads a non zero byte file", func() {
It("succeeds", func() {
var (
fileContent = []byte("0123456789")
)

uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{})
It("fails when trying to upload empty data. 0-byte uploads are finished during initialization already", func() {
bs.On("Upload", mock.AnythingOfType("*node.Node"), mock.AnythingOfType("string"), mock.Anything).
Return(nil)
uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{})

Expect(err).ToNot(HaveOccurred())
Expect(len(uploadIds)).To(Equal(2))
Expect(uploadIds["simple"]).ToNot(BeEmpty())
Expect(uploadIds["tus"]).ToNot(BeEmpty())

uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]}

bs.On("Upload", mock.AnythingOfType("*node.Node"), mock.AnythingOfType("string"), mock.Anything).
Return(nil).
Run(func(args mock.Arguments) {
data, err := os.ReadFile(args.Get(1).(string))

Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("0123456789")))
})

_, err = fs.Upload(ctx, storage.UploadRequest{
Ref: uploadRef,
Body: io.NopCloser(bytes.NewReader(fileContent)),
Length: int64(len(fileContent)),
Body: io.NopCloser(bytes.NewReader([]byte(""))),
Length: 0,
}, nil)

Expect(err).ToNot(HaveOccurred())
bs.AssertCalled(GinkgoT(), "Upload", mock.Anything, mock.Anything, mock.Anything)

resources, err := fs.ListFolder(ctx, rootRef, []string{}, []string{})

Expect(err).ToNot(HaveOccurred())
Expect(len(resources)).To(Equal(1))
Expect(resources[0].Path).To(Equal(ref.Path))
Expect(err).To(HaveOccurred())
})
})

When("the user uploads a zero byte file", func() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we add a negative test to document the clients should know they are not supposed to send a subsequent request after initiating a 0 byte file upload?

When("the user uploads a non zero byte file", func() {
It("succeeds", func() {
var (
fileContent = []byte("")
fileContent = []byte("0123456789")
)

uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{})
uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{})

Expect(err).ToNot(HaveOccurred())
Expect(len(uploadIds)).To(Equal(2))
Expand All @@ -305,7 +286,7 @@ var _ = Describe("File uploads", func() {
data, err := os.ReadFile(args.Get(1).(string))

Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("")))
Expect(data).To(Equal([]byte("0123456789")))
})

_, err = fs.Upload(ctx, storage.UploadRequest{
Expand Down
2 changes: 2 additions & 0 deletions pkg/storage/utils/metadata/cs3.go
Expand Up @@ -181,6 +181,8 @@ func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) (*UploadResponse,
ifuReq.Opaque = utils.AppendPlainToOpaque(ifuReq.Opaque, "X-OC-Mtime", strconv.Itoa(int(req.MTime.Unix()))+"."+strconv.Itoa(req.MTime.Nanosecond()))
}

ifuReq.Opaque = utils.AppendPlainToOpaque(ifuReq.Opaque, net.HeaderUploadLength, strconv.FormatInt(int64(len(req.Content)), 10))

res, err := client.InitiateFileUpload(ctx, ifuReq)
if err != nil {
return nil, err
Expand Down