Skip to content

Commit

Permalink
Replace some raw pointers with std::unique_ptr
Browse files Browse the repository at this point in the history
This simplifies code paths and makes memory leaks less likely.  It
also makes memory ownership more explicit by requiring std::move.
This commit requires C++11.  References s3fs-fuse#2179.
  • Loading branch information
gaul committed Jun 24, 2023
1 parent 7c9cf84 commit 5ac7502
Show file tree
Hide file tree
Showing 16 changed files with 108 additions and 169 deletions.
2 changes: 1 addition & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ release : dist ../utils/release.sh
cppcheck:
cppcheck --quiet --error-exitcode=1 \
--inline-suppr \
--std=c++03 \
--std=c++11 \
--xml \
-D HAVE_ATTR_XATTR_H \
-D HAVE_SYS_EXTATTR_H \
Expand Down
34 changes: 10 additions & 24 deletions src/addhead.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ bool AdditionalHeader::Load(const char* file)

// read file
std::string line;
ADDHEAD *paddhead;
while(getline(AH, line)){
if(line.empty()){
continue;
Expand Down Expand Up @@ -111,44 +110,41 @@ bool AdditionalHeader::Load(const char* file)
return false;
}

paddhead = new ADDHEAD;
std::unique_ptr<ADDHEAD> paddhead(new ADDHEAD);
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
// regex
if(key.size() <= strlen(ADD_HEAD_REGEX)){
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
delete paddhead;
continue;
}
key.erase(0, strlen(ADD_HEAD_REGEX));

// compile
regex_t* preg = new regex_t;
std::unique_ptr<regex_t> preg(new regex_t);
int result;
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
char errbuf[256];
regerror(result, preg, errbuf, sizeof(errbuf));
regerror(result, preg.get(), errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
delete preg;
delete paddhead;
continue;
}

// set
paddhead->pregex = preg;
paddhead->pregex = std::move(preg);
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;

}else{
// not regex, directly comparing
paddhead->pregex = NULL;
paddhead->pregex.reset(nullptr);
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
}

// add list
addheadlist.push_back(paddhead);
addheadlist.push_back(std::move(paddhead));

// set flag
if(!is_enable){
Expand All @@ -162,16 +158,6 @@ void AdditionalHeader::Unload()
{
is_enable = false;

for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
ADDHEAD *paddhead = *iter;
if(paddhead){
if(paddhead->pregex){
regfree(paddhead->pregex);
delete paddhead->pregex;
}
delete paddhead;
}
}
addheadlist.clear();
}

Expand All @@ -193,15 +179,15 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
const ADDHEAD *paddhead = *iter;
const ADDHEAD *paddhead = iter->get();
if(!paddhead){
continue;
}

if(paddhead->pregex){
// regex
regmatch_t match; // not use
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
if(0 == regexec(paddhead->pregex.get(), path, 1, &match, 0)){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
Expand Down Expand Up @@ -246,7 +232,7 @@ bool AdditionalHeader::Dump() const
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;

for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
const ADDHEAD *paddhead = *iter;
const ADDHEAD *paddhead = iter->get();

ssdbg << " [" << cnt << "] = {" << std::endl;

Expand Down
11 changes: 9 additions & 2 deletions src/addhead.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#ifndef S3FS_ADDHEAD_H_
#define S3FS_ADDHEAD_H_

#include <memory>
#include <regex.h>

#include "metaheader.h"
Expand All @@ -29,13 +30,19 @@
// Structure / Typedef
//----------------------------------------------
typedef struct add_header{
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
~add_header() {
if(pregex){
regfree(pregex.get());
}
}

std::unique_ptr<regex_t> pregex; // not NULL means using regex, NULL means comparing suffix directly.
std::string basestring;
std::string headkey;
std::string headvalue;
}ADDHEAD;

typedef std::vector<ADDHEAD *> addheadlist_t;
typedef std::vector<std::unique_ptr<ADDHEAD>> addheadlist_t;

//----------------------------------------------
// Class AdditionalHeader
Expand Down
47 changes: 16 additions & 31 deletions src/curl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <cstdlib>
#include <ctime>
#include <errno.h>
#include <memory>
#include <unistd.h>
#include <fstream>
#include <sstream>
Expand Down Expand Up @@ -1345,7 +1346,7 @@ int S3fsCurl::MapPutErrorResponse(int result)
// It is a factory method as utility because it requires an S3fsCurl object
// initialized for multipart upload from outside this class.
//
S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result)
std::unique_ptr<S3fsCurl> S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result)
{
// duplicate fd
if(!tpath || -1 == fd || start < 0 || size <= 0 || !petag){
Expand All @@ -1355,7 +1356,7 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star
}
result = 0;

S3fsCurl* s3fscurl = new S3fsCurl(true);
std::unique_ptr<S3fsCurl> s3fscurl(new S3fsCurl(true));

if(!is_copy){
s3fscurl->partdata.fd = fd;
Expand All @@ -1370,7 +1371,6 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star

if(0 != (result = s3fscurl->UploadMultipartPostSetup(tpath, part_num, upload_id))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl;
return NULL;
}
}else{
Expand All @@ -1392,16 +1392,14 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star

if(0 != (result = s3fscurl->CopyMultipartPostSetup(tpath, tpath, part_num, upload_id, meta))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl;
return NULL;
}
}

// Call lazy function
if(!s3fscurl->fpLazySetup || !s3fscurl->fpLazySetup(s3fscurl)){
if(!s3fscurl->fpLazySetup || !s3fscurl->fpLazySetup(s3fscurl.get())){
S3FS_PRN_ERR("failed lazy function setup for uploading part");
result = -EIO;
delete s3fscurl;
return NULL;
}
return s3fscurl;
Expand Down Expand Up @@ -1438,7 +1436,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
off_t chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;

// s3fscurl sub object
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
s3fscurl_para->partdata.fd = fd;
s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes;
s3fscurl_para->partdata.size = chunk;
Expand All @@ -1449,14 +1447,12 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
// initiate upload part for parallel
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, s3fscurl_para->partdata.get_part_number(), upload_id))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
delete s3fscurl_para;
return -EIO;
}
remaining_bytes -= chunk;
Expand Down Expand Up @@ -1517,8 +1513,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){
if(iter->modified){
// Multipart upload
S3fsCurl* s3fscurl_para = new S3fsCurl(true);

std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
s3fscurl_para->partdata.fd = fd;
s3fscurl_para->partdata.startpos = iter->offset;
s3fscurl_para->partdata.size = iter->bytes;
Expand All @@ -1531,20 +1526,18 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
// initiate upload part for parallel
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, s3fscurl_para->partdata.get_part_number(), upload_id))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
delete s3fscurl_para;
return -EIO;
}
}else{
// Multipart copy
for(off_t i = 0, bytes = 0; i < iter->bytes; i += bytes){
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));

bytes = std::min(static_cast<off_t>(GetMultipartCopySize()), iter->bytes - i);
/* every part should be larger than MIN_MULTIPART_SIZE and smaller than FIVE_GB */
Expand All @@ -1571,14 +1564,12 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
// initiate upload part for parallel
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
delete s3fscurl_para;
return -EIO;
}
}
Expand Down Expand Up @@ -1657,17 +1648,15 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, o
chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;

// s3fscurl sub object
S3fsCurl* s3fscurl_para = new S3fsCurl();
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){
S3FS_PRN_ERR("failed downloading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
delete s3fscurl_para;
return -EIO;
}
}
Expand Down Expand Up @@ -4359,22 +4348,20 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
meta["x-amz-copy-source-range"] = strrange.str();

// s3fscurl sub object
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
s3fscurl_para->b_from = SAFESTRPTR(tpath);
s3fscurl_para->b_meta = meta;
s3fscurl_para->partdata.add_etag_list(list);

// initiate upload part for parallel
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
delete s3fscurl_para;
return -EIO;
}
}
Expand Down Expand Up @@ -4456,22 +4443,20 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
meta["x-amz-copy-source-range"] = strrange.str();

// s3fscurl sub object
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
s3fscurl_para->b_from = SAFESTRPTR(from);
s3fscurl_para->b_meta = meta;
s3fscurl_para->partdata.add_etag_list(list);

// initiate upload part for parallel
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(from, to, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
delete s3fscurl_para;
return result;
}

// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to);
delete s3fscurl_para;
return -EIO;
}
}
Expand Down
3 changes: 2 additions & 1 deletion src/curl.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <curl/curl.h>
#include <list>
#include <map>
#include <memory>
#include <vector>

#include "autolock.h"
Expand Down Expand Up @@ -268,7 +269,7 @@ class S3fsCurl
static bool InitCredentialObject(S3fsCred* pcredobj);
static bool InitMimeType(const std::string& strFile);
static bool DestroyS3fsCurl();
static S3fsCurl* CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
static std::unique_ptr<S3fsCurl> CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
Expand Down

0 comments on commit 5ac7502

Please sign in to comment.