Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,8 @@ Let's see how to cache options to file when we tune a training layer.
out = convolution(I, W)
out[0].sum().backward()

You will find two cache files created: :code:`convolution_train.cuda/options` has
options for the forward layer and :code:`convolution_train_backward.cuda/options` file
You will find a cache file created: :code:`convolution_train.options` has
options for the forward layer and :code:`convolution_train_backward.options` file
has options for the grad layer.

Reordering grad outputs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ For example:
tc.decode
---------

When you save the autotuner cache, two files are created ending in :code:`.cuda/.options`.
When you save the autotuner cache, one file is created ending in :code:`.options`.
The :code:`.options` file contains the encoded kernel options. If you are curious
about what those options look like, you can decode the options by calling :code:`tc.decode`

Expand Down
7 changes: 3 additions & 4 deletions tc/autotuner/genetic_autotuner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ void GeneticAutotuner::storeCaches(const std::string& filename) {
if (filename.empty()) {
std::cout << "No filepath provided, not saving cache" << std::endl;
} else {
std::cout << "Dumping cache to " << filename << ".cuda/options"
<< std::endl;
std::cout << "Dumping cache to " << filename << ".options" << std::endl;
tc::OptionsCache::getCache()->keepOnlyBestCandidates(
tc::FLAGS_tuner_save_best_candidates_count);
tc::OptionsCache::dumpCacheToProtobuf(tc::makeOptionsFilename(filename));
Expand All @@ -69,7 +68,7 @@ std::vector<CudaMappingOptions> GeneticAutotuner::load(
const std::vector<const DLTensor*>& inputs,
const size_t numCandidates) {
std::cout << "Loading proto from: " << tc::makeOptionsFilename(cacheFileName)
<< " and " << tc::makeCudaFilename(cacheFileName) << std::endl;
<< std::endl;
enableOrLoadCache(cacheFileName);
tc::FLAGS_tuner_gen_restore_number =
std::min(numCandidates, size_t(FLAGS_tuner_gen_pop_size) - 1);
Expand Down Expand Up @@ -141,7 +140,7 @@ llvm::Optional<CudaMappingOptions> GeneticAutotuner::tune(
tuner.run(FLAGS_tuner_gen_generations);
} catch (const std::exception& e) {
std::cerr << "Exception during autotuning: " << e.what()
<< "\n dumping cache to " << cacheFileName << ".cuda/options"
<< "\n dumping cache to " << cacheFileName << ".options"
<< std::endl;
storeCaches(cacheFileName);
tunerThreadEx = std::current_exception();
Expand Down
3 changes: 1 addition & 2 deletions tc/benchmarks/benchmark_fixture.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,7 @@ struct Benchmark : public ::testing::Test {
return true;
}) {
std::cout << "Validating proto from: "
<< tc::makeOptionsFilename(cacheFilename) << "and "
<< tc::makeCudaFilename(cacheFilename) << std::endl;
<< tc::makeOptionsFilename(cacheFilename) << std::endl;

tc::OptionsCache::enableCache();
tc::OptionsCache::loadCacheFromProtobuf(cacheFilename + ".options");
Expand Down
3 changes: 0 additions & 3 deletions tc/core/compilation_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,4 @@ inline std::string makeOptionsFilename(const std::string& filename) {
return filename + ".options";
}

inline std::string makeCudaFilename(const std::string& filename) {
return filename + ".cuda";
}
} // namespace tc
2 changes: 1 addition & 1 deletion test_python/layers/test_autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def test_autotuner_cachefile_first(self):
def test_autotuner_cachefile_load(self):
lang = MATMUL_LANG
cache_file = "{}/matmul_100_400_500".format(PATH_PREFIX) # use argparse if input from command line
assert os.path.isfile("{}.cuda".format(cache_file)), "looks like the cache_file doesn't exist"
assert os.path.isfile("{}.options".format(cache_file)), "looks like the cache_file doesn't exist"

matmul = tc.define(lang, name="matmul")
mat1, mat2 = torch.randn(100, 400).cuda(), torch.randn(400, 500).cuda()
Expand Down