From 16edb35b189b6f7e0313d9bd1e4a37e75c31ce48 Mon Sep 17 00:00:00 2001 From: FangQianan Date: Wed, 25 Dec 2024 14:18:39 +0800 Subject: [PATCH] release 1.91.5 --- ChangeLog | 12 + configure.ac | 2 +- doc/man/ossfs.1.in | 10 +- scripts/ossfs-coverage-centos7.sh | 4 + src/fdcache.cpp | 17 ++ src/fdcache.h | 2 + src/s3fs.cpp | 136 +++++---- src/s3fs_global.cpp | 2 +- src/s3fs_help.cpp | 13 +- src/test_curl_util.cpp | 54 ++++ src/test_string_util.cpp | 100 ++++++- src/test_util.h | 10 +- test/integration-test-main.sh | 279 ++++++++++++++++++ test/small-integration-test.sh | 15 +- test/test-utils.sh | 8 + test/test_policies/policy.json | 17 ++ .../test_policies/policy_noaccess_prefix.json | 60 ++++ test/test_policy_mount.sh | 236 +++++++++++++++ 18 files changed, 896 insertions(+), 81 deletions(-) create mode 100644 test/test_policies/policy.json create mode 100644 test/test_policies/policy_noaccess_prefix.json create mode 100644 test/test_policy_mount.sh diff --git a/ChangeLog b/ChangeLog index 425eec3..565e179 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,17 @@ ChangeLog for OSSFS ------------------ +## v1.91.5 (25/12/2024) + - Fixed some bugs that users get unexpected results on mounting with specific policies for the bucket/prefix set. + - Fixed a bug that ossfs take a file as existing if HeadObj returns 403. + - Change the default value of complement_stat to true. + - Support free_space_ratio. + +## v1.91.4 (24/09/2024) + - Add a new option sigv4 to support OSS V4 signature. + - Fixed a bug that concurrent writing may cause deadlock when disk space is insufficient. + - Support authentication via dynamic lib. + - Fixed a bug that AK and SK are stored in wrong environment variables. + ## v1.91.3 (04/06/2024) - Fixed a bug that IO fails when running out of disk space. - Fixed a bug that default_acl option does not working. diff --git a/configure.ac b/configure.ac index 37c4ac2..40a69c5 100644 --- a/configure.ac +++ b/configure.ac @@ -20,7 +20,7 @@ dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) -AC_INIT(ossfs, 1.91.4) +AC_INIT(ossfs, 1.91.5) AC_CONFIG_HEADER([config.h]) AC_CANONICAL_SYSTEM diff --git a/doc/man/ossfs.1.in b/doc/man/ossfs.1.in index ad77253..1cfb140 100644 --- a/doc/man/ossfs.1.in +++ b/doc/man/ossfs.1.in @@ -187,6 +187,12 @@ sets MB to ensure disk free space. This option means the threshold of free space ossfs makes file for downloading, uploading and caching files. If the disk free space is smaller than this value, ossfs do not use disk space as possible in exchange for the performance. .TP +\fB\-o\fR free_space_ratio (default="0") +sets min free space ratio of the disk. The value of this option can be between 0 and 100. It will control +the size of the cache according to this ratio to ensure that the idle ratio of the disk is greater than this value. +For example, when the disk space is 50GB, the 10% value will +ensure that the disk will reserve at least 50GB * 10% = 5GB of remaining space. +.TP \fB\-o\fR multipart_threshold (default="25") threshold, in MB, to use multipart upload instead of single-part. Must be at least 5 MB. @@ -291,9 +297,9 @@ https://curl.haxx.se/docs/ssl-ciphers.html The instance name of the current ossfs mountpoint. This name will be added to logging messages and user agent headers sent by ossfs. .TP -\fB\-o\fR complement_stat (complement lack of file/directory mode) +\fB\-o\fR complement_stat (complement lack of file/directory mode, enabled by default) ossfs complements lack of information about file/directory mode if a file or a directory object does not have x-oss-meta-mode header. -As default, ossfs does not complements stat information for a object, then the object will not be able to be allowed to list/modify. +If disabled, ossfs does not complements stat information for a object, then the object will not be able to be allowed to list/modify. .TP \fB\-o\fR notsup_compat_dir (disable support of alternative directory names) .RS diff --git a/scripts/ossfs-coverage-centos7.sh b/scripts/ossfs-coverage-centos7.sh index f4a1d8b..cc52987 100644 --- a/scripts/ossfs-coverage-centos7.sh +++ b/scripts/ossfs-coverage-centos7.sh @@ -34,6 +34,10 @@ rm -rf ${OSSFS_SOURCE_DIR}/coverage_html && mkdir ${OSSFS_SOURCE_DIR}/coverage_h DBGLEVEL=debug ALL_TESTS=1 OSSFS_CREDENTIALS_FILE=/root/.passwd-ossfs TEST_BUCKET_1=${BUCKET} S3PROXY_BINARY="" OSS_URL=${URL} ./small-integration-test.sh +${OSSFS_SOURCE_DIR}/src/test_page_list +${OSSFS_SOURCE_DIR}/src/test_curl_util +${OSSFS_SOURCE_DIR}/src/test_string_util + gcovr -r ${OSSFS_SOURCE_DIR}/src --html-details -o ${OSSFS_SOURCE_DIR}/coverage_html/coverage.html diff --git a/src/fdcache.cpp b/src/fdcache.cpp index fea2046..d672118 100644 --- a/src/fdcache.cpp +++ b/src/fdcache.cpp @@ -272,6 +272,11 @@ bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize) return true; } +off_t FdManager::GetTotalDiskSpaceByRatio(int ratio) +{ + return FdManager::GetTotalDiskSpace(nullptr) * ratio / 100; +} + off_t FdManager::GetTotalDiskSpace(const char* path) { struct statvfs vfsbuf; @@ -328,6 +333,18 @@ bool FdManager::IsSafeDiskSpace(const char* path, off_t size) return size + FdManager::GetEnsureFreeDiskSpace() <= fsize; } +bool FdManager::IsSafeDiskSpaceWithLog(const char* path, off_t size) +{ + off_t fsize = FdManager::GetFreeDiskSpace(path); + off_t needsize = size + FdManager::GetEnsureFreeDiskSpace(); + if(needsize <= fsize){ + return true; + } else { + S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by ossfs. Requires %.3f MB, already has %.3f MB.", static_cast(needsize) / 1024 / 1024, static_cast(fsize) / 1024 / 1024); + return false; + } +} + bool FdManager::HaveLseekHole() { if(FdManager::checked_lseek){ diff --git a/src/fdcache.h b/src/fdcache.h index ed5ea63..a0b90ad 100644 --- a/src/fdcache.h +++ b/src/fdcache.h @@ -84,12 +84,14 @@ class FdManager static off_t SetEnsureFreeDiskSpace(off_t size); static bool InitFakeUsedDiskSize(off_t fake_freesize); static bool IsSafeDiskSpace(const char* path, off_t size); + static bool IsSafeDiskSpaceWithLog(const char* path, off_t size); static void FreeReservedDiskSpace(off_t size); static bool ReserveDiskSpace(off_t size); static bool HaveLseekHole(); static bool SetTmpDir(const char* dir); static bool CheckTmpDirExist(); static FILE* MakeTempFile(); + static off_t GetTotalDiskSpaceByRatio(int ratio); // Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use. FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true, bool lock_already_held = false); diff --git a/src/s3fs.cpp b/src/s3fs.cpp index 14db725..5d9d7eb 100644 --- a/src/s3fs.cpp +++ b/src/s3fs.cpp @@ -131,7 +131,6 @@ static int rename_object(const char* from, const char* to, bool update_ctime); static int rename_object_nocopy(const char* from, const char* to, bool update_ctime); static int clone_directory_object(const char* from, const char* to, bool update_ctime); static int rename_directory(const char* from, const char* to); -static int remote_mountpath_exists(const char* path); static void free_xattrs(xattrs_t& xattrs); static bool parse_xattr_keyval(const std::string& xattrpair, std::string& key, PXATTRVAL& pval); static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs); @@ -396,26 +395,7 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t s3fscurl.DestroyCurlHandle(); // if not found target path object, do over checking - if(-EPERM == result){ - // [NOTE] - // In case of a permission error, it exists in directory - // file list but inaccessible. So there is a problem that - // it will send a HEAD request every time, because it is - // not registered in the Stats cache. - // Therefore, even if the file has a permission error, it - // should be registered in the Stats cache. However, if - // the response without modifying is registered in the - // cache, the file permission will be 0644(umask dependent) - // because the meta header does not exist. - // Thus, set the mode of 0000 here in the meta header so - // that ossfs can print a permission error when the file - // is actually accessed. - // It is better not to set meta header other than mode, - // so do not do it. - // - (*pheader)["x-oss-meta-mode"] = str(0); - - }else if(0 != result){ + if(0 != result){ if(overcheck){ // when support_compat_dir is disabled, strpath maybe have "_$folder$". if('/' != *strpath.rbegin() && std::string::npos == strpath.find("_$folder$", 0)){ @@ -467,7 +447,7 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t // If the file is listed but not allowed access, put it in // the positive cache instead of the negative cache. // - if(0 != result && -EPERM != result){ + if(0 != result){ // finally, "path" object did not find. Add no object cache. strpath = path; // reset original StatCache::getStatCacheData()->AddNoObjectCache(strpath); @@ -3031,24 +3011,6 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, return 0; } -static int remote_mountpath_exists(const char* path) -{ - struct stat stbuf; - int result; - - S3FS_PRN_INFO1("[path=%s]", path); - - // getattr will prefix the path with the remote mountpoint - if(0 != (result = get_object_attribute("/", &stbuf, NULL))){ - return result; - } - if(!S_ISDIR(stbuf.st_mode)){ - return -ENOTDIR; - } - return 0; -} - - static void free_xattrs(xattrs_t& xattrs) { for(xattrs_t::iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ @@ -3777,7 +3739,7 @@ static int s3fs_check_service() S3fsCurl s3fscurl; int res; - if(0 > (res = s3fscurl.CheckBucket("/"))){ + if(0 > (res = s3fscurl.CheckBucket(get_realpath("/").c_str()))){ // get response code long responseCode = s3fscurl.GetLastResponseCode(); @@ -3828,7 +3790,9 @@ static int s3fs_check_service() } else { s3host = "http://" + expecthost; } - //extract region from host for sigv4 + // extract region from host for sigv4 + // The region of the government cloud/financial cloud may not be derived from the domain name + // https://help.aliyun.com/zh/oss/user-guide/regions-and-endpoints?spm=a2c4g.11186623.0.0.13ad12c1N7PoaV if(!strncasecmp(expecthost.c_str(), "oss-", 4)){ std::size_t found; if ((found = expecthost.find_first_of(".")) != std::string::npos) { @@ -3838,18 +3802,11 @@ static int s3fs_check_service() } // retry to check with new host s3fscurl.DestroyCurlHandle(); - res = s3fscurl.CheckBucket("/"); + res = s3fscurl.CheckBucket(get_realpath("/").c_str()); responseCode = s3fscurl.GetLastResponseCode(); } } - // retry to check with mount prefix - if(300 <= responseCode && responseCode < 500 && !mount_prefix.empty()){ - s3fscurl.DestroyCurlHandle(); - res = s3fscurl.CheckBucket(get_realpath("/").c_str()); - responseCode = s3fscurl.GetLastResponseCode(); - } - // try signature v2 /* if(0 > res && (responseCode == 400 || responseCode == 403) && S3fsCurl::GetSignatureType() == V1_OR_V4){ @@ -3864,35 +3821,39 @@ static int s3fs_check_service() } */ // check errors(after retrying) + // [NOTE] + // When mounting a bucket, an error code is returned and the mount fails. + // However, when mounting a prefix, success should be returned if the prefix does not exist. + // if(0 > res && responseCode != 200 && responseCode != 301){ // parse error message if existed std::string errMessage; const std::string* body = s3fscurl.GetBodyData(); check_error_message(body->c_str(), body->size(), errMessage); - + bool is_failure = true; if(responseCode == 400){ S3FS_PRN_CRIT("Failed to check bucket and directory for mount point : Bad Request(host=%s, message=%s)", s3host.c_str(), errMessage.c_str()); }else if(responseCode == 403){ S3FS_PRN_CRIT("Failed to check bucket and directory for mount point : Invalid Credentials(host=%s, message=%s)", s3host.c_str(), errMessage.c_str()); - }else if(responseCode == 404){ - S3FS_PRN_CRIT("Failed to check bucket and directory for mount point : Bucket or directory not found(host=%s, message=%s)", s3host.c_str(), errMessage.c_str()); + }else if (responseCode == 404) { + std::string value; + if(simple_parse_xml(body->c_str(), body->size(), "Code", value)) { + if(value == "NoSuchBucket") { + S3FS_PRN_CRIT("Failed to check bucket : Bucket not found(host=%s, message=%s)", s3host.c_str(), errMessage.c_str()); + } else { + is_failure = false; + } + } }else{ S3FS_PRN_CRIT("Failed to check bucket and directory for mount point : Unable to connect(host=%s, message=%s)", s3host.c_str(), errMessage.c_str()); } - return EXIT_FAILURE; - } - } - s3fscurl.DestroyCurlHandle(); - - // make sure remote mountpath exists and is a directory - if(!mount_prefix.empty()){ - if(remote_mountpath_exists(mount_prefix.c_str()) != 0){ - S3FS_PRN_CRIT("remote mountpath %s not found.", mount_prefix.c_str()); - return EXIT_FAILURE; + if (is_failure) { + return EXIT_FAILURE; + } } } S3FS_MALLOCTRIM(0); - + return EXIT_SUCCESS; } @@ -4407,8 +4368,34 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar max_dirty_data = size; return 0; } + if(is_prefix(arg, "free_space_ratio=")){ + int ratio = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10)); + if(FdManager::GetEnsureFreeDiskSpace()!=0){ + S3FS_PRN_EXIT("option free_space_ratio conflicts with ensure_diskfree, please set only one of them."); + return -1; + } + if(ratio < 0 || ratio > 100){ + S3FS_PRN_EXIT("option free_space_ratio must between 0 to 100, which is: %d", ratio); + return -1; + } + off_t dfsize = FdManager::GetTotalDiskSpaceByRatio(ratio); + S3FS_PRN_INFO("Free space ratio set to %d %%, ensure the available disk space is greater than %.3f MB", ratio, static_cast(dfsize) / 1024 / 1024); + if(dfsize < S3fsCurl::GetMultipartSize()){ + S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); + dfsize = S3fsCurl::GetMultipartSize(); + } + FdManager::SetEnsureFreeDiskSpace(dfsize); + return 0; + } if(is_prefix(arg, "ensure_diskfree=")){ off_t dfsize = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10) * 1024 * 1024; + + if(FdManager::GetEnsureFreeDiskSpace()!=0){ + S3FS_PRN_EXIT("option free_space_ratio conflicts with ensure_diskfree, please set only one of them."); + return -1; + } + S3FS_PRN_INFO("Set and ensure the available disk space is greater than %.3f MB.", static_cast(dfsize) / 1024 / 1024); + if(dfsize < S3fsCurl::GetMultipartSize()){ S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); dfsize = S3fsCurl::GetMultipartSize(); @@ -5078,12 +5065,16 @@ int main(int argc, char* argv[]) // check free disk space if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ - S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - destroy_parser_xml_lock(); - delete ps3fscred; - exit(EXIT_FAILURE); + // clean cache dir and retry + S3FS_PRN_WARN("No enough disk space for ossfs, try to clean cache dir"); + FdManager::get()->CleanupCacheDir(); + if(!FdManager::IsSafeDiskSpaceWithLog(nullptr, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + destroy_parser_xml_lock(); + delete ps3fscred; + exit(EXIT_FAILURE); + } } // check readdir_optimize @@ -5101,6 +5092,11 @@ int main(int argc, char* argv[]) S3FS_PRN_INFO("Readdir optimize, flag(%d, %lld)", is_refresh_fakemeta, static_cast(readdir_check_size)); } + // try to check s3fs service + if (EXIT_SUCCESS != s3fs_check_service()) { + exit(EXIT_FAILURE); + } + s3fs_oper.getattr = s3fs_getattr; s3fs_oper.readlink = s3fs_readlink; s3fs_oper.mknod = s3fs_mknod; diff --git a/src/s3fs_global.cpp b/src/s3fs_global.cpp index d086b96..1a4a255 100644 --- a/src/s3fs_global.cpp +++ b/src/s3fs_global.cpp @@ -27,7 +27,7 @@ bool foreground = false; bool nomultipart = false; bool pathrequeststyle = false; -bool complement_stat = false; +bool complement_stat = true; bool noxmlns = true; bool direct_read = false; diff --git a/src/s3fs_help.cpp b/src/s3fs_help.cpp index 429bb9f..fbcea96 100644 --- a/src/s3fs_help.cpp +++ b/src/s3fs_help.cpp @@ -219,6 +219,15 @@ static const char help_string[] = " space is smaller than this value, ossfs do not use disk space\n" " as possible in exchange for the performance.\n" "\n" + " free_space_ratio (default=\"0\")\n" + " - sets min free space ratio of the disk.\n" + " The value of this option can be between 0 and 100. It will control\n" + " the size of the cache according to this ratio to ensure that the\n" + " idle ratio of the disk is greater than this value.\n" + " For example, when the disk space is 50GB, the default value will\n" + " ensure that the disk will reserve at least 50GB * 10%% = 5GB of\n" + " remaining space.\n" + "\n" " multipart_threshold (default=\"25\")\n" " - threshold, in MB, to use multipart upload instead of\n" " single-part. Must be at least 5 MB.\n" @@ -357,10 +366,10 @@ static const char help_string[] = " instance_name - The instance name of the current ossfs mountpoint.\n" " This name will be added to logging messages and user agent headers sent by ossfs.\n" "\n" - " complement_stat (complement lack of file/directory mode)\n" + " complement_stat (complement lack of file/directory mode, enabled by default)\n" " ossfs complements lack of information about file/directory mode\n" " if a file or a directory object does not have x-oss-meta-mode\n" - " header. As default, ossfs does not complements stat information\n" + " header. If disabled, ossfs does not complements stat information\n" " for a object, then the object will not be able to be allowed to\n" " list/modify.\n" "\n" diff --git a/src/test_curl_util.cpp b/src/test_curl_util.cpp index f979ca1..ded7ed9 100644 --- a/src/test_curl_util.cpp +++ b/src/test_curl_util.cpp @@ -150,10 +150,64 @@ void test_slist_remove() curl_slist_free_all(list); } +void test_curl_slist_sort_insert() +{ + struct curl_slist* list = NULL; + ASSERT_IS_SORTED(list); + + list = curl_slist_sort_insert(list, "2:val2"); + ASSERT_IS_SORTED(list); + ASSERT_STREQUALS("2: val2", list->data); + + list = curl_slist_sort_insert(list, "4:val4"); + ASSERT_IS_SORTED(list); + ASSERT_STREQUALS("2: val2", list->data); + + list = curl_slist_sort_insert(list, "1:val1"); + ASSERT_IS_SORTED(list); + ASSERT_STREQUALS("1: val1", list->data); + + list = curl_slist_sort_insert(list, "3:val3"); + ASSERT_IS_SORTED(list); + ASSERT_STREQUALS("1: val1", list->data); + + ASSERT_EQUALS(static_cast(4), curl_slist_length(list)); + + // check all elements in list + int i = 1; + for(auto head = list; head != NULL; head = head->next, i ++) { + std::string element = std::to_string(i) + ": val" + std::to_string(i); + ASSERT_STREQUALS(element.c_str(), head->data); + } + curl_slist_free_all(list); +} +void test_make_md5_from_binary() { + std::string md5; + + // Normal case: Non-empty input + const char* data = "Hello, World!"; + size_t length = strlen(data); + ASSERT_TRUE(make_md5_from_binary(data, length, md5)); + + // Invalid case: Empty input + const char* empty_data = ""; + size_t empty_length = strlen(empty_data); + ASSERT_FALSE(make_md5_from_binary(empty_data, empty_length, md5)); + + // Invalid case: Null pointer + ASSERT_FALSE(make_md5_from_binary(nullptr, 0, md5)); + + // Invalid case: Empty string with non-null pointer + const char* empty_string = "\0"; + ASSERT_FALSE(make_md5_from_binary(empty_string, 1, md5)); +} + int main(int argc, char *argv[]) { test_sort_insert(); test_slist_remove(); + test_curl_slist_sort_insert(); + test_make_md5_from_binary(); return 0; } diff --git a/src/test_string_util.cpp b/src/test_string_util.cpp index 3bf35f6..0ec4e8a 100644 --- a/src/test_string_util.cpp +++ b/src/test_string_util.cpp @@ -150,6 +150,102 @@ void test_wtf8_encoding() ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed); } +void test_get_keyword_value() { + std::string value; + + // Normal case: Keyword exists and has a value + ASSERT_TRUE(get_keyword_value("http://example.com?keyword=value&other=param", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Multiple parameters: Keyword exists among other parameters + ASSERT_TRUE(get_keyword_value("http://example.com?keyword=value&other=param&another=param2", "other", value)); + ASSERT_STREQUALS(value.c_str(), "param"); + + // Keyword at the end: Keyword is the last parameter in the URL + ASSERT_TRUE(get_keyword_value("http://example.com?other=param&another=param2&keyword=value", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Single parameter: URL contains only one parameter + ASSERT_TRUE(get_keyword_value("http://example.com?keyword=value", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Keyword does not exist: URL does not contain the specified keyword + ASSERT_FALSE(get_keyword_value("http://example.com?other=param", "keyword", value)); + + // No equal sign after keyword: Keyword is not followed by an equal sign + ASSERT_FALSE(get_keyword_value("http://example.com?keywordvalue&other=param", "keyword", value)); + + // Empty keyword: Keyword is an empty string + ASSERT_FALSE(get_keyword_value("http://example.com?=value&other=param", "", value)); + + // Null keyword: Keyword is NULL + ASSERT_FALSE(get_keyword_value("http://example.com?keyword=value&other=param", nullptr, value)); + + // Empty URL: URL is an empty string + ASSERT_FALSE(get_keyword_value("", "keyword", value)); + + // Null URL: URL is NULL (empty string in this case) + ASSERT_FALSE(get_keyword_value("", nullptr, value)); + + // No parameters: URL does not contain any parameters + ASSERT_FALSE(get_keyword_value("http://example.com", "keyword", value)); + + // No equal sign: Keyword is not followed by an equal sign + ASSERT_FALSE(get_keyword_value("http://example.com?keyword", "keyword", value)); + + // No value: Keyword is followed by an equal sign but no value + ASSERT_TRUE(get_keyword_value("http://example.com?keyword=&other=param", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), ""); + + // Keyword at the beginning: Keyword is the first parameter in the URL + ASSERT_TRUE(get_keyword_value("keyword=value&other=param", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Keyword in the middle: Keyword is in the middle of the URL + ASSERT_TRUE(get_keyword_value("other=param&keyword=value&another=param2", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Keyword at the end: Keyword is the last parameter in the URL + ASSERT_TRUE(get_keyword_value("other=param&another=param2&keyword=value", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value"); + + // Multiple same keywords: URL contains multiple occurrences of the same keyword + ASSERT_TRUE(get_keyword_value("http://example.com?keyword=value1&keyword=value2&other=param", "keyword", value)); + ASSERT_STREQUALS(value.c_str(), "value1"); +} + +void test_get_unixtime_from_iso8601() { + time_t unixtime; + + // Normal case: Valid ISO8601 date string + ASSERT_TRUE(get_unixtime_from_iso8601("2023-10-11T12:34:56", unixtime)); + ASSERT_EQUALS(unixtime, 1697027696); // Expected Unix timestamp for 2023-10-11T12:34:56 + + // Different valid ISO8601 date string + ASSERT_TRUE(get_unixtime_from_iso8601("2024-11-12T01:02:03", unixtime)); + ASSERT_EQUALS(unixtime, 1731373323); // Expected Unix timestamp for 2024-11-12T01:02:03 + + // Date string with zero time + ASSERT_TRUE(get_unixtime_from_iso8601("2023-10-11T00:00:00", unixtime)); + ASSERT_EQUALS(unixtime, 1696982400); // Expected Unix timestamp for 2023-10-11T00:00:00 +} + +void test_convert_unixtime_from_option_arg() { + time_t unixtime; + + // Normal case: Valid input with all units + ASSERT_TRUE(convert_unixtime_from_option_arg("1Y2M3D4h5m6s", unixtime)); + ASSERT_EQUALS(unixtime, 36993906); + + // Only years + ASSERT_TRUE(convert_unixtime_from_option_arg("1Y", unixtime)); + ASSERT_EQUALS(unixtime, 31536000); + + // Mixed units + ASSERT_TRUE(convert_unixtime_from_option_arg("1Y2D4h", unixtime)); + ASSERT_EQUALS(unixtime, 31723200); +} + int main(int argc, char *argv[]) { S3fsLog singletonLog; @@ -158,7 +254,9 @@ int main(int argc, char *argv[]) test_base64(); test_strtoofft(); test_wtf8_encoding(); - + test_get_keyword_value(); + test_get_unixtime_from_iso8601(); + test_convert_unixtime_from_option_arg(); return 0; } diff --git a/src/test_util.h b/src/test_util.h index 6d6fc6b..ec4db10 100644 --- a/src/test_util.h +++ b/src/test_util.h @@ -46,6 +46,14 @@ template <> void assert_equals(const std::string &x, const std::string &y, const } } +template <> void assert_equals(const time_t &x, const time_t &y, const char *file, int line) +{ + if (x != y) { + std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; + std::cerr << std::endl; + std::exit(1); + } +} template void assert_nequals(const T &x, const T &y, const char *file, int line) { @@ -89,7 +97,7 @@ void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, co #define ASSERT_TRUE(x) assert_equals((x), true, __FILE__, __LINE__) #define ASSERT_FALSE(x) assert_equals((x), false, __FILE__, __LINE__) -#define ASSERT_EQUALS(x, y) assert_equals((x), (y), __FILE__, __LINE__) +#define ASSERT_EQUALS(x, y) assert_equals(static_cast(x), y, __FILE__, __LINE__) #define ASSERT_NEQUALS(x, y) assert_nequals((x), (y), __FILE__, __LINE__) #define ASSERT_STREQUALS(x, y) assert_strequals((x), (y), __FILE__, __LINE__) #define ASSERT_BUFEQUALS(x, len1, y, len2) assert_bufequals((x), (len1), (y), (len2), __FILE__, __LINE__) diff --git a/test/integration-test-main.sh b/test/integration-test-main.sh index 3201783..4658cb8 100755 --- a/test/integration-test-main.sh +++ b/test/integration-test-main.sh @@ -352,6 +352,30 @@ function test_list { rm_test_dir } +function test_list_more { + describe "Testing list more ..." + local DIR_NAME="listdir" + mkdir ${DIR_NAME} + + # [NOTE] + # The maximum number of objects returned by ossfs listobject is 1000. + # If it exceeds this number, some flags are required for paging. + # Therefore, the number of files is set to 1100. + # + for i in $(seq 1 1100); do + touch ${DIR_NAME}/file${i} + done + + ls_cnt=$(ls ${DIR_NAME} -1 | wc -l) + + if [ "${ls_cnt}" -ne 1100 ]; then + echo "Expected 1100 file but got ${ls_cnt}" + return 1 + fi + + rm -rf ${DIR_NAME} +} + function test_remove_nonempty_directory { describe "Testing removing a non-empty directory ..." mk_test_dir @@ -578,6 +602,26 @@ function test_rename_before_close { rm -f "${TEST_TEXT_FILE}" } +function test_rename_large_file { + describe "Testing rename large file ..." + + # create a big file larger than singlepart_copy_limit + dd if=/dev/urandom of="${BIG_FILE}" bs="${BIG_FILE_BLOCK_SIZE}" count="${BIG_FILE_COUNT}" + + local md5=$(md5sum "${BIG_FILE}" | awk '{print $1}') + + mv "${BIG_FILE}" "${BIG_FILE}.new" + + local new_md5=$(md5sum "${BIG_FILE}.new" | awk '{print $1}') + + if [ "${md5}" != "${new_md5}" ]; then + echo "md5 mismatch, rename file failed" + return 1 + fi + + rm_test_file "${BIG_FILE}.new" +} + function test_multipart_upload { describe "Testing multi-part upload ..." @@ -768,6 +812,41 @@ function test_symlink { rm -f "${ALT_TEST_TEXT_FILE}" } +function test_mknod { + describe "Testing mknod ..." + + local major=$((RANDOM % 256)) + local minor=$((RANDOM % 256)) + + # Attempt to create a character device file + mknod "${ALT_TEST_TEXT_FILE}" c "${major}" "${minor}" + if [ ! -c "${ALT_TEST_TEXT_FILE}" ]; then + echo "mknod failed" + return 1 + fi + + rm -f "${ALT_TEST_TEXT_FILE}" + + if [ -e "${ALT_TEST_TEXT_FILE}" ]; then + echo "rm character device file failed" + return 1 + fi + + # Attempt to create a block device file + mknod "${ALT_TEST_TEXT_FILE}" b "${major}" "${minor}" + if [ ! -b "${ALT_TEST_TEXT_FILE}" ]; then + echo "mknod failed" + return 1 + fi + + rm -f "${ALT_TEST_TEXT_FILE}" + + if [ -e "${ALT_TEST_TEXT_FILE}" ]; then + echo "rm block device file failed" + return 1 + fi +} + function test_extended_attributes { describe "Testing extended attributes ..." @@ -791,6 +870,44 @@ function test_extended_attributes { rm_test_file } +function test_all_extended_attributes { + describe "Testing all extended attributes ..." + + rm_test_file + mk_test_file + + # set value + set_xattr key1 value1 "${TEST_TEXT_FILE}" + if ! get_xattr key1 "${TEST_TEXT_FILE}" | grep -q '^value1$'; then + echo "The value of key1 is not 'value1' or the attribute does not exist." + return 1 + fi + + # append value + set_xattr key2 value2 "${TEST_TEXT_FILE}" + if ! get_xattr key2 "${TEST_TEXT_FILE}" | grep -q '^value2$'; then + echo "The value of key2 is not 'value2' or the attribute does not exist." + return 1 + fi + + # list all value + set_xattr key3 value3 "${TEST_TEXT_FILE}" + ls_cnt=$(list_xattr "${TEST_TEXT_FILE}" | sed '1d;$d' | wc -l) + if [ "$ls_cnt" -ne 3 ]; then + echo "The number of attributes is not 3." + return 1 + fi + + # remove value + del_xattr key1 "${TEST_TEXT_FILE}" + if get_xattr key1 "${TEST_TEXT_FILE}" | grep -q '^value1$'; then + echo "The value of key1 is still exist, del_xattr failed." + return 1 + fi + + rm_test_file +} + function test_mtime_file { describe "Testing mtime preservation function ..." @@ -1466,6 +1583,37 @@ function test_truncate_cache() { rm -rf $(seq 2) } +function test_truncate_symlink_cache { + describe "Test truncate symlink cache ..." + + local SRC_DIR="source_dir" + local DST_DIR="destination_dir" + + mkdir "${SRC_DIR}" + mkdir "${DST_DIR}" + + # [NOTE] + # create more symlink files than -o max_stat_cache_size. + # ossfs truncate symlink files require the number of + # symlink files to be greater than max_stat_cache_size. + # Set max_stat_cache_size=100 in the mount parameters. + # Therefore, 110 symlink files are created here. + # + for i in $(seq 110); do + touch "${SRC_DIR}/file${i}" + ln -s "${SRC_DIR}/file${i}" "${DST_DIR}/file${i}" + done + + ls_cnt=$(ls -1 "${DST_DIR}" | wc -l) + if [ "${ls_cnt}" -ne 110 ]; then + echo "Unexpected ls count: ${ls_cnt}, expected: 110. symlink failed" + return 1; + fi + + rm -rf "${SRC_DIR}" + rm -rf "${DST_DIR}" +} + function test_cache_file_stat() { describe "Test cache file stat ..." @@ -2581,6 +2729,115 @@ function test_mix_direct_read_with_skip { rm -f "${TEMP_DIR}/${TEST_FILE}" } +function test_check_cache_sigusr1 { + describe "Testing check cache sigusr1 ..." + + # create the test file again + mk_test_file + + # trigger the check cache + kill -SIGUSR1 "${OSSFS_PID}" + + #check the cache + error_files=$(cat ${CHECK_CACHE_FILE} | grep "Detected error files" | awk '{print $4}' | tail -n -1) + + if [[ -n "$error_files" && "$error_files" != "0" ]]; then + echo "check cache failed, there are $error_files error files" + return 1 + fi + + error_directories=$(cat ${CHECK_CACHE_FILE} | grep "Detected error directories" | awk '{print $4}' | tail -n -1) + + if [[ -n "$error_directories" && "$error_directories" != "0" ]]; then + echo "check cache failed, there are $error_directories error directories" + return 1 + fi + + # clean up + rm_test_file +} + +function test_reopen_log_with_sighup { + describe "Testing reopen log ..." + + rm -rf "${LOGFILE}" + + # [NOTE] + # this signal will trigger reopen log. + # it will touch a new log file if old log file is deleted. + kill -SIGHUP "${OSSFS_PID}" + + sleep 1 + + if [ ! -e "${LOGFILE}" ]; then + echo "reopen log failed" + return 1 + fi + } + +function test_change_log_level_with_sigusr2 { + describe "Testing change log level ..." + + # [NOTE] + # The log level can be changed by sending a SIGUSR2 signal to the ossfs process. + # The log level changes through the following sequence: + # DBG -> CRT -> ERR -> WAN -> INF -> DBG + # The current log level is DBG, and we need to cycle through the levels to return to DBG. + # Therefore, 5 signals are sent to complete one full cycle. + # + kill -SIGUSR2 "${OSSFS_PID}" + kill -SIGUSR2 "${OSSFS_PID}" + kill -SIGUSR2 "${OSSFS_PID}" + kill -SIGUSR2 "${OSSFS_PID}" + kill -SIGUSR2 "${OSSFS_PID}" + + # Capture the log file content. + cat "${LOGFILE}" + + # Verify that the log level has changed from INF back to DBG. + if grep -q "change debug level" "${LOGFILE}"; then + echo "Log level change successful." + else + echo "change log level failed" + return 1 + fi +} + +function test_add_head { + describe "Testing add_head ..." + + local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}.bz2" + + touch "${TEST_TEXT_FILE}.bz2" + + if ! aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${OBJECT_NAME}" | grep -q "bzip2"; then + echo "The Content-Encoding of ${OBJECT_NAME} is not bzip2, add head failed" + return 1 + fi + + rm -f "${TEST_TEXT_FILE}.bz2" +} + +function test_statvfs { + describe "Testing the free/available size on mount point(statvfs)..." + + # [NOTE] + # The df command result format is different between Linux and macos, + # but the order of Total/Used/Available size is the same. + # + local MOUNTPOINT_DIR; MOUNTPOINT_DIR=$(cd ..; pwd) + local DF_RESULT; DF_RESULT=$(df "${MOUNTPOINT_DIR}" 2>/dev/null | tail -n +2) + local TOTAL_SIZE; TOTAL_SIZE=$(echo "${DF_RESULT}" | awk '{print $2}') + local USED_SIZE; USED_SIZE=$(echo "${DF_RESULT}" | awk '{print $3}') + local AVAIL_SIZE; AVAIL_SIZE=$(echo "${DF_RESULT}" | awk '{print $4}') + + df -h > /dev/null + if [ -z "${TOTAL_SIZE}" ] || [ -z "${AVAIL_SIZE}" ] || [ -z "${USED_SIZE}" ] || [ "${TOTAL_SIZE}" = "0" ] || [ "${AVAIL_SIZE}" = "0" ] || [ "${TOTAL_SIZE}" != "${AVAIL_SIZE}" ] || [ "${USED_SIZE}" != "0" ]; then + echo "The result of df command is wrong: Total=${TOTAL_SIZE}, Used=${USED_SIZE}, Available=${AVAIL_SIZE}" + return 1 + fi +} + function add_all_tests { # shellcheck disable=SC2009 if ps u -p "${OSSFS_PID}" | grep -q use_cache; then @@ -2604,6 +2861,7 @@ function add_all_tests { add_tests test_redirects add_tests test_mkdir_rmdir add_tests test_list + add_tests test_list_more add_tests test_remove_nonempty_directory add_tests test_external_directory_creation add_tests test_external_modification @@ -2613,6 +2871,7 @@ function add_all_tests { add_tests test_update_metadata_external_small_object add_tests test_update_metadata_external_large_object add_tests test_rename_before_close + add_tests test_rename_large_file add_tests test_multipart_upload add_tests test_multipart_copy add_tests test_multipart_mix @@ -2620,6 +2879,7 @@ function add_all_tests { add_tests test_special_characters add_tests test_hardlink add_tests test_symlink + add_tests test_mknod add_tests test_update_chmod_opened_file add_tests test_rm_rf_dir @@ -2638,6 +2898,12 @@ function add_all_tests { add_tests test_write_multiple_offsets add_tests test_write_multiple_offsets_backwards add_tests test_content_type + + if ps u -p "${OSSFS_PID}" | grep -q max_stat_cache_size; then + add_tests test_truncate_cache + add_tests test_truncate_symlink_cache + fi + add_tests test_truncate_cache add_tests test_upload_sparsefile add_tests test_mix_upload_entities @@ -2664,6 +2930,7 @@ function add_all_tests { add_tests test_chmod add_tests test_chown add_tests test_extended_attributes + add_tests test_all_extended_attributes add_tests test_mtime_file add_tests test_update_time_chmod @@ -2709,6 +2976,18 @@ function add_all_tests { if ps u -p "${OSSFS_PID}" | grep -q direct_read_local_file_cache_size_mb; then add_tests test_mix_direct_read fi + + if ps u -p "${OSSFS_PID}" | grep -q set_check_cache_sigusr1; then + add_tests test_check_cache_sigusr1 + fi + + if ps u -p "${OSSFS_PID}" | grep -q logfile; then + add_tests test_reopen_log_with_sighup + add_tests test_change_log_level_with_sigusr2 + fi + + add_tests test_add_head + add_tests test_statvfs } init_suite diff --git a/test/small-integration-test.sh b/test/small-integration-test.sh index 9d08bfd..47e109c 100755 --- a/test/small-integration-test.sh +++ b/test/small-integration-test.sh @@ -29,6 +29,8 @@ set -o pipefail source integration-test-common.sh CACHE_DIR="/tmp/ossfs-cache" +LOGFILE="/tmp/ossfs.log" +CHECK_CACHE_FILE="/tmp/ossfs-cache-check.log" rm -rf "${CACHE_DIR}" mkdir "${CACHE_DIR}" @@ -39,21 +41,24 @@ FAKE_FREE_DISK_SIZE=200 ENSURE_DISKFREE_SIZE=10 BACKWARD_CHUNKS=1 DIRECT_READ_LOCAL_FILE_CACHE_SIZE_MB=2048 +AHBE_CONFIG="./sample_ahbe.conf" export CACHE_DIR +export LOGFILE +export CHECK_CACHE_FILE export ENSURE_DISKFREE_SIZE export DIRECT_READ_LOCAL_FILE_CACHE_SIZE_MB if [ -n "${ALL_TESTS}" ]; then FLAGS=( - "use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE}" + "use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE} -o del_cache" enable_content_md5 enable_noobj_cache - "max_stat_cache_size=100" + "max_stat_cache_size=100 -o stat_cache_expire=-1" nocopyapi nomultipart notsup_compat_dir sigv1 - "singlepart_copy_limit=10" # limit size to exercise multipart code paths + "singlepart_copy_limit=10 -o noshallowcopyapi" # limit size to exercise multipart code paths use_sse use_sse=kms listobjectsv2 @@ -67,6 +72,10 @@ if [ -n "${ALL_TESTS}" ]; then "default_acl=private" "direct_read -o direct_read_local_file_cache_size_mb=${DIRECT_READ_LOCAL_FILE_CACHE_SIZE_MB}" "sigv4 -o region=${OSS_REGION}" + ahbe_conf=${AHBE_CONFIG} + "use_cache=${CACHE_DIR} -o del_cache -o set_check_cache_sigusr1=${CHECK_CACHE_FILE} -o logfile=${LOGFILE} -o check_cache_dir_exist" + "max_dirty_data=50" + "use_cache=${CACHE_DIR} -o free_space_ratio=1 -o del_cache" ) else FLAGS=( diff --git a/test/test-utils.sh b/test/test-utils.sh index 965ba2d..380fe40 100644 --- a/test/test-utils.sh +++ b/test/test-utils.sh @@ -79,6 +79,14 @@ function get_xattr() { fi } +function list_xattr() { + if [ "$(uname)" = "Darwin" ]; then + xattr -l "$1" + else + getfattr -d -m . --absolute-names "$1" + fi +} + function set_xattr() { if [ "$(uname)" = "Darwin" ]; then xattr -w "$1" "$2" "$3" diff --git a/test/test_policies/policy.json b/test/test_policies/policy.json new file mode 100644 index 0000000..b72b7f6 --- /dev/null +++ b/test/test_policies/policy.json @@ -0,0 +1,17 @@ +{ + "Version": "1", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "oss:*" + ], + "Principal": [ + "*" + ], + "Resource": [ + "acs:oss:*:************:test-policy-bucket/dir", + "acs:oss:*:************:test-policy-bucket/dir/", + "acs:oss:*:************:test-policy-bucket/dir/*" + ] + }] +} \ No newline at end of file diff --git a/test/test_policies/policy_noaccess_prefix.json b/test/test_policies/policy_noaccess_prefix.json new file mode 100644 index 0000000..e4433c1 --- /dev/null +++ b/test/test_policies/policy_noaccess_prefix.json @@ -0,0 +1,60 @@ +{ + "Version": "1", + "Statement": [{ + "Effect": "Deny", + "Action": [ + "oss:RestoreObject", + "oss:ListObjects", + "oss:AbortMultipartUpload", + "oss:PutObjectAcl", + "oss:GetObjectAcl", + "oss:ListParts", + "oss:DeleteObject", + "oss:PutObject", + "oss:GetObject", + "oss:GetVodPlaylist", + "oss:PostVodPlaylist", + "oss:PublishRtmpStream", + "oss:ListObjectVersions", + "oss:GetObjectVersion", + "oss:GetObjectVersionAcl", + "oss:RestoreObjectVersion" + ], + "Principal": [ + "*" + ], + "Resource": [ + "acs:oss:*:**************:test-policy-bucket/dir/*" + ] + },{ + "Effect": "Allow", + "Action": [ + "oss:ListObjects", + "oss:GetObject" + ], + "Principal": [ + "*" + ], + "Resource": [ + "acs:oss:*:************:test-policy-bucket" + ], + "Condition": { + "StringLike": { + "oss:Prefix": [ + "*" + ] + } + } + },{ + "Effect": "Allow", + "Action": [ + "oss:*" + ], + "Principal": [ + "*" + ], + "Resource": [ + "acs:oss:*:************:test-policy-bucket/*" + ] + }] +} \ No newline at end of file diff --git a/test/test_policy_mount.sh b/test/test_policy_mount.sh new file mode 100644 index 0000000..eb7e354 --- /dev/null +++ b/test/test_policy_mount.sh @@ -0,0 +1,236 @@ +# This script is used to test whether the ossfs mount meets expectations under different bucket policy settings. +# To use this script, you need to know how to set the corresponding bucket policy. +# see https://help.aliyun.com/zh/oss/user-guide/use-bucket-policy-to-grant-permission-to-access-oss/?spm=a2c4g.11186623.0.0.1b814b76rHJgy5#concept-2071378 +# You need to modify the policy.json file + +# # This script runs all tests by default. You can run it in the following way +# Usage: ./test_policy_mount.sh [bucket] [mp] [ak] [sk] [endpoint] [subdir] +# example: ./test_policy_mount.sh your_bucket /mnt ak sk http://oss-cn-hangzhou.aliyuncs.com dir + +# This script can also be used to run a single test. You can run it in the following way +# Usage: ./test_policy_mount.sh [bucket] [mp] [ak] [sk] [endpoint] [subdir] [case] +# example: ./test_policy_mount.sh your_bucket /mnt ak sk http://oss-cn-hangzhou.aliyuncs.com dir test_no_such_bucket + +#!/bin/bash + +bucket_name=$1 +mp=$2 +ak=$3 +sk=$4 +endpoint=$5 +subdir_name=$6 +case=$7 + +function drop_cache { + sync + (echo 3 | tee /proc/sys/vm/drop_caches) > /dev/null +} + +function run_test { + local test_case=$1 + shift + drop_cache + echo "testing $test_case " + + if [ $# != 0 ]; then + $test_case $@ + test_status=$? + else + $test_case + test_status=$? + fi + + if [ $test_status -eq 0 ]; then + echo "test $test_case passed" + else + echo "test $test_case failed" + fi +} + +function mount { + bucket=$1 + subdir=$2 + if [ -z "$subdir" ]; then + ossfs $bucket $mp -ourl=$endpoint -odbglevel=dbg -ologfile=/root/ossfs_policy.log + else + ossfs $bucket:/$subdir $mp -ourl=$endpoint -odbglevel=dbg -ologfile=/root/ossfs_policy.log + fi +} + +function unmount { + umount $mp 2> /dev/null + rm -rf /root/ossfs_policy.log +} + +function add_all_tests { + run_test "test_no_such_bucket" + run_test "test_bucket_no_policy_with_subdir_no_policy" + run_test "test_bucket_no_policy_with_subdir_has_policy" + run_test "test_exitcode" + run_test "test_create_without_perm" +} + +function install_ossutil { + if ! [ -x "$(command -v ossutil)" ]; then + curl https://gosspublic.alicdn.com/ossutil/install.sh > install_ossutil.sh + bash install_ossutil.sh + if ! [ -x "$(command -v ossutil)" ]; then + echo "Failed to install ossutil" + exit 1 + fi + fi +} + +function ossutil_cmd { + ossutil -i $ak -k $sk -e $endpoint $@ +} + +function test_no_such_bucket { + if ps -ef | grep -v "grep" | grep -q "ossfs"; then + unmount + fi + + # mount temp bucket + tmp_bucket="tmp_bucket_$(date +%s)" + mount $tmp_bucket + sleep 1 + if ps -ef | grep -v "grep" | grep -q "ossfs"; then + echo "ossfs is running, not expected" + return 1 + fi + + if ! grep -q "NoSuchBucket" /root/ossfs_policy.log; then + echo "not found NoSuchBucket in log, not expected" + fi + + unmount + # mount temp subdir + mount $tmp_bucket $subdir_name + sleep 1 + if ps -ef | grep -v "grep" | grep -q "ossfs"; then + echo "ossfs is running, not expected" + return 1 + fi + + if ! grep -q "NoSuchBucket" /root/ossfs_policy.log; then + echo "not found NoSuchBucket in log, not expected" + fi + + unmount +} + +function test_bucket_no_policy_with_subdir_no_policy { + if ps -ef | grep -v "grep" | grep -q "ossfs"; then + unmount + fi + + # test mount bucket no policy + mount $bucket_name + sleep 1 + if ! grep -q "AccessDenied" /root/ossfs_policy.log; then + echo "not found AccessDenied in log, not expected" + return 1 + fi + unmount + + # test subdir no policy + subdir=tmp_subdir_$(date +%s) + + # test subdir does not exist + mount $bucket_name $subdir + sleep 1 + if ! grep -q "AccessDenied" /root/ossfs_policy.log; then + echo "not found AccessDenied in log, not expected" + return 1 + fi + unmount + + # test subdir exist + ossutil_cmd mkdir oss://$bucket_name/$subdir_name + mount $bucket_name $subdir + sleep 1 + if ! grep -q "AccessDenied" /root/ossfs_policy.log; then + echo "not found AccessDenied in log, not expected" + return 1 + fi + unmount + +} + +function test_bucket_no_policy_with_subdir_has_policy { + if ps -ef | grep -v "grep" | grep -q "ossfs"; then + unmount + fi + + # test mount bucket no policy but has subdir policy + ossutil_cmd bucket-policy --method put oss://$bucket_name test_policies/policy.json + + # test subdir does not exist + ossutil_cmd rm -r -f oss://$bucket_name/$subdir_name + + mount $bucket_name $subdir_name + sleep 1 + if ! grep -q "NoSuchKey" /root/ossfs_policy.log; then + echo "not found NoSuchKey in log, not expected" + return 1 + fi + + if ! ps -ef | grep -v "grep" | grep -q "ossfs"; then + echo "ossfs is not running, not expected" + return 1 + fi + unmount + + # test subdir exist + ossutil_cmd mkdir oss://$bucket_name/$subdir_name + + mount $bucket_name $subdir_name + sleep 1 + if ! ps -ef | grep -v "grep" | grep -q "ossfs"; then + echo "ossfs is not running, not expected" + return 1 + fi + unmount +} + +function test_exitcode { + # set policy.json as no access to the bucket + ossutil_cmd bucket-policy --method put oss://$bucket_name test_policies/policy.json + + mount $bucket_name $subdir_name + if [ $? == 0 ]; then + return 1 + fi + + unmount +} + +function test_create_without_perm { + # set policy_noaccess_prefix.json: + # full access to the bucket + # no access to the prefix/* + ossutil_cmd bucket-policy --method put oss://$bucket_name test_policies/policy_noaccess_prefix.json + + mount $bucket_name + sleep 1 + + res=`ps -ef | grep ossfs | grep -v grep | grep "$mp"` + if [ -z "$res" ]; then + echo 1 + return 1 + fi + + res=`mkdir -p $mp/dir/whatever 2>/dev/stdout` + if ! echo "$res" | grep "not permitted"; then + return 1 + fi + + unmount +} + +install_ossutil +if [ -z "$case" ]; then + add_all_tests +else + run_test $case +fi \ No newline at end of file