Skip to content

Commit

Permalink
Merge branch 'main' of github.com:apple/foundationdb into jfu-grv-cac…
Browse files Browse the repository at this point in the history
…he-multi-threaded
  • Loading branch information
sfc-gh-jfu committed Mar 29, 2022
2 parents bdbe78e + 8a68781 commit 53d252f
Show file tree
Hide file tree
Showing 131 changed files with 3,833 additions and 677 deletions.
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -35,7 +35,7 @@ The official docker image for building is [`foundationdb/build`](https://hub.doc
To build outside the official docker image you'll need at least these dependencies:

1. Install cmake Version 3.13 or higher [CMake](https://cmake.org/)
1. Install [Mono](http://www.mono-project.com/download/stable/)
1. Install [Mono](https://www.mono-project.com/download/stable/)
1. Install [Ninja](https://ninja-build.org/) (optional, but recommended)

If compiling for local development, please set `-DUSE_WERROR=ON` in
Expand Down Expand Up @@ -177,7 +177,7 @@ Under Windows, only Visual Studio with ClangCl is supported
1. Install [Python](https://www.python.org/downloads/) if is not already installed by Visual Studio
1. (Optional) Install [OpenJDK 11](https://developers.redhat.com/products/openjdk/download) to build Java bindings
1. (Optional) Install [OpenSSL 3.x](https://slproweb.com/products/Win32OpenSSL.html) to build with TLS support
1. (Optional) Install [WIX Toolset](http://wixtoolset.org/) to build Windows installer
1. (Optional) Install [WIX Toolset](https://wixtoolset.org/) to build Windows installer
1. `mkdir build && cd build`
1. `cmake -G "Visual Studio 16 2019" -A x64 -T ClangCl <PATH_TO_FOUNDATIONDB_SOURCE>`
1. `msbuild /p:Configuration=Release foundationdb.sln`
Expand Down
14 changes: 14 additions & 0 deletions bindings/bindingtester/bindingtester.py
Expand Up @@ -202,6 +202,7 @@ def __init__(self, args):
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))

self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
self.args.no_tenants = self.args.no_tenants or any([not tester.tenants_enabled for tester in self.testers])

def print_test(self):
test_instructions = self._generate_test()
Expand Down Expand Up @@ -282,6 +283,17 @@ def _generate_test(self):
def _insert_instructions(self, test_instructions):
util.get_logger().info('\nInserting test into database...')
del self.db[:]

while True:
tr = self.db.create_transaction()
try:
tr.options.set_special_key_space_enable_writes()
del tr[b'\xff\xff/management/tenant_map/' : b'\xff\xff/management/tenant_map0']
tr.commit().wait()
break
except fdb.FDBError as e:
tr.on_error(e).wait()

for subspace, thread in test_instructions.items():
thread.insert_operations(self.db, subspace)

Expand Down Expand Up @@ -445,6 +457,8 @@ def parse_args(argv):

parser.add_argument('--no-directory-snapshot-ops', action='store_true', help='Disables snapshot operations for directory instructions.')

parser.add_argument('--no-tenants', action='store_true', help='Disables tenant operations.')

return parser.parse_args(argv)


Expand Down
7 changes: 4 additions & 3 deletions bindings/bindingtester/known_testers.py
Expand Up @@ -26,7 +26,7 @@


class Tester:
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True):
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True, tenants_enabled=False):
self.name = name
self.cmd = cmd
self.max_int_bits = max_int_bits
Expand All @@ -35,6 +35,7 @@ def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_versio
self.threads_enabled = threads_enabled
self.types = types
self.directory_snapshot_ops_enabled = directory_snapshot_ops_enabled
self.tenants_enabled = tenants_enabled

def supports_api_version(self, api_version):
return api_version >= self.min_api_version and api_version <= self.max_api_version
Expand All @@ -57,8 +58,8 @@ def _absolute_path(path):

# We could set min_api_version lower on some of these if the testers were updated to support them
testers = {
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
Expand Down
77 changes: 77 additions & 0 deletions bindings/bindingtester/spec/tenantTester.md
@@ -0,0 +1,77 @@
Overview
--------

Tenant testing is an optional extension to the core binding tester that enables
testing of the tenant API. This testing is enabled by adding some additional
instructions and modifying the behavior of some existing instructions.

Additional State and Initialization
-----------------------------------

Your tester should store an additional piece of state tracking the active tenant
that is to be used to create transactions. This tenant must support an unset
state, in which case transactions will be created directly on the database.

New Instructions
----------------

The tenant API introduces some new operations:

#### TENANT_CREATE

Pops the top item off of the stack as TENANT_NAME. Creates a new tenant
in the database with the name TENANT_NAME. May optionally push a future
onto the stack.

#### TENANT_DELETE

Pops the top item off of the stack as TENANT_NAME. Deletes the tenant with
the name TENANT_NAME from the database. May optionally push a future onto
the stack.

#### TENANT_SET_ACTIVE

Pops the top item off of the stack as TENANT_NAME. Opens the tenant with
name TENANT_NAME and stores it as the active tenant.

#### TENANT_CLEAR_ACTIVE

Unsets the active tenant.

Updates to Existing Instructions
--------------------------------

Some existing operations in the binding tester will have slightly modified
behavior when tenants are enabled.

#### NEW_TRANSACTION

When creating a new transaction, the active tenant should be used. If no active
tenant is set, then the transaction should be created as normal using the
database.

#### _TENANT suffix

Similar to the _DATABASE suffix, an operation with the _TENANT suffix indicates
that the operation should be performed on the current active tenant object. If
there is no active tenant, then the operation should be performed on the database
as if _DATABASE was specified. In any case where the operation suffixed with
_DATABASE is allowed to push a future onto the stack, the same operation suffixed
with _TENANT is also allowed to push a future onto the stack.

If your binding does not support operations directly on a tenant object, you should
simulate it using an anonymous transaction. Remember that set and clear operations
must immediately commit (with appropriate retry behavior!).

Operations that can include the _TENANT prefix are:

GET_TENANT
GET_KEY_TENANT
GET_RANGE_TENANT
GET_RANGE_STARTS_WITH_TENANT
GET_RANGE_SELECTOR_TENANT
SET_TENANT
CLEAR_TENANT
CLEAR_RANGE_TENANT
CLEAR_RANGE_STARTS_WITH_TENANT
ATOMIC_OP_TENANT
38 changes: 35 additions & 3 deletions bindings/bindingtester/tests/api.py
Expand Up @@ -58,6 +58,7 @@ def setup(self, args):
self.outstanding_ops = []
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
self.api_version = args.api_version
self.allocated_tenants = set()

def add_stack_items(self, num):
self.stack_size += num
Expand Down Expand Up @@ -137,6 +138,12 @@ def wait_for_reads(self, instructions):
test_util.to_front(instructions, self.stack_size - read[0])
instructions.append('WAIT_FUTURE')

def choose_tenant(self, new_tenant_probability):
if len(self.allocated_tenants) == 0 or random.random() < new_tenant_probability:
return self.random.random_string(random.randint(0, 30))
else:
return random.choice(list(self.allocated_tenants))

def generate(self, args, thread_number):
instructions = InstructionSet()

Expand All @@ -158,6 +165,7 @@ def generate(self, args, thread_number):
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
txn_sizes = ['GET_APPROXIMATE_SIZE']
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS']
tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE']

op_choices += reads
op_choices += mutations
Expand All @@ -173,6 +181,9 @@ def generate(self, args, thread_number):
op_choices += txn_sizes
op_choices += storage_metrics

if not args.no_tenants:
op_choices += tenants

idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS']

Expand All @@ -195,7 +206,7 @@ def generate(self, args, thread_number):

# print 'Adding instruction %s at %d' % (op, index)

if args.concurrency == 1 and (op in database_mutations):
if args.concurrency == 1 and (op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
self.wait_for_reads(instructions)
test_util.blocking_commit(instructions)
self.can_get_commit_version = False
Expand Down Expand Up @@ -570,18 +581,39 @@ def generate(self, args, thread_number):
instructions.push_args(key1, key2, chunkSize)
instructions.append(op)
self.add_strings(1)

elif op == 'TENANT_CREATE':
tenant_name = self.choose_tenant(0.8)
self.allocated_tenants.add(tenant_name)
instructions.push_args(tenant_name)
instructions.append(op)
self.add_strings(1)
elif op == 'TENANT_DELETE':
tenant_name = self.choose_tenant(0.2)
if tenant_name in self.allocated_tenants:
self.allocated_tenants.remove(tenant_name)
instructions.push_args(tenant_name)
instructions.append(op)
self.add_strings(1)
elif op == 'TENANT_SET_ACTIVE':
tenant_name = self.choose_tenant(0.8)
instructions.push_args(tenant_name)
instructions.append(op)
elif op == 'TENANT_CLEAR_ACTIVE':
instructions.append(op)
else:
assert False, 'Unknown operation: ' + op

if read_performed and op not in database_reads:
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))

if args.concurrency == 1 and (op in database_reads or op in database_mutations):
if args.concurrency == 1 and (op in database_reads or op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
instructions.append('WAIT_FUTURE')

instructions.begin_finalization()

if not args.no_tenants:
instructions.append('TENANT_CLEAR_ACTIVE')

if args.concurrency == 1:
self.wait_for_reads(instructions)
test_util.blocking_commit(instructions)
Expand Down
3 changes: 3 additions & 0 deletions bindings/c/CMakeLists.txt
Expand Up @@ -137,6 +137,7 @@ if(NOT WIN32)
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
add_executable(fdb_c_txn_size_test test/txn_size_test.c test/test.h)
add_executable(fdb_c_client_memory_test test/client_memory_test.cpp test/unit/fdb_api.cpp test/unit/fdb_api.hpp)
add_executable(mako ${MAKO_SRCS})
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
Expand All @@ -147,10 +148,12 @@ if(NOT WIN32)
strip_debug_symbols(fdb_c_performance_test)
strip_debug_symbols(fdb_c_ryw_benchmark)
strip_debug_symbols(fdb_c_txn_size_test)
strip_debug_symbols(fdb_c_client_memory_test)
endif()
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c Threads::Threads)
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c Threads::Threads)
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c Threads::Threads)
target_link_libraries(fdb_c_client_memory_test PRIVATE fdb_c Threads::Threads)

add_dependencies(fdb_c_setup_tests doctest)
add_dependencies(fdb_c_unit_tests doctest)
Expand Down
3 changes: 2 additions & 1 deletion bindings/c/fdb_c.cpp
Expand Up @@ -851,9 +851,10 @@ extern "C" DLLEXPORT FDBResult* fdb_transaction_read_blob_granules(FDBTransactio
context.get_load_f = granule_context.get_load_f;
context.free_load_f = granule_context.free_load_f;
context.debugNoMaterialize = granule_context.debugNoMaterialize;
context.granuleParallelism = granule_context.granuleParallelism;

Optional<Version> rv;
if (readVersion != invalidVersion) { rv = readVersion; }
if (readVersion != latestVersion) { rv = readVersion; }

return (FDBResult*)(TXN(tr)->readBlobGranules(range, beginVersion, rv, context).extractPtr()););
}
Expand Down
16 changes: 12 additions & 4 deletions bindings/c/foundationdb/fdb_c.h
Expand Up @@ -176,7 +176,12 @@ typedef struct readgranulecontext {
void* userContext;

/* Returns a unique id for the load. Asynchronous to support queueing multiple in parallel. */
int64_t (*start_load_f)(const char* filename, int filenameLength, int64_t offset, int64_t length, void* context);
int64_t (*start_load_f)(const char* filename,
int filenameLength,
int64_t offset,
int64_t length,
int64_t fullFileLength,
void* context);

/* Returns data for the load. Pass the loadId returned by start_load_f */
uint8_t* (*get_load_f)(int64_t loadId, void* context);
Expand All @@ -187,6 +192,9 @@ typedef struct readgranulecontext {
/* Set this to true for testing if you don't want to read the granule files,
just do the request to the blob workers */
fdb_bool_t debugNoMaterialize;

/* Number of granules to load in parallel */
int granuleParallelism;
} FDBReadBlobGranuleContext;

DLLEXPORT void fdb_future_cancel(FDBFuture* f);
Expand Down Expand Up @@ -432,15 +440,15 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_split_points(F
int end_key_name_length,
int64_t chunk_size);

DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_blob_granule_ranges(FDBTransaction* db,
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_blob_granule_ranges(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length);

/* InvalidVersion (-1) for readVersion means get read version from transaction
/* LatestVersion (-2) for readVersion means get read version from transaction
Separated out as optional because BG reads can support longer-lived reads than normal FDB transactions */
DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTransaction* db,
DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
Expand Down

0 comments on commit 53d252f

Please sign in to comment.