Skip to content

Commit

Permalink
Removed all references to LevelDB - it has not been the recommended d…
Browse files Browse the repository at this point in the history
…isk database format for a while now.
  • Loading branch information
zerebubuth committed Feb 22, 2015
1 parent 12b014e commit 77c5707
Show file tree
Hide file tree
Showing 10 changed files with 7 additions and 155 deletions.
7 changes: 2 additions & 5 deletions README.md
Expand Up @@ -20,18 +20,15 @@ Before building the code, you will need:
* libxml2 (version 2.6.31 recommended),
* The Boost libraries (version 1.49 recommended),
* libosmpbf (version 1.3.0 recommended),
* leveldb (version 1.9.0 recommended),
* libprotobuf and libprotobuf-lite (version 2.4.1 recommended)
* libsnappy

To install these on Ubuntu, you can just type:

sudo apt-get install build-essential automake autoconf \
libxml2-dev libboost-dev libboost-program-options-dev \
libboost-date-time-dev libboost-filesystem-dev \
libboost-thread-dev libboost-iostreams-dev \
libosmpbf-dev osmpbf-bin libsnappy-dev \
libprotobuf-dev pkg-config
libosmpbf-dev osmpbf-bin libprotobuf-dev pkg-config

After that, it should just be a matter of running:

Expand All @@ -51,7 +48,7 @@ you can read by running:

planet-dump-ng --help

One thing to note is that the program will create LevelDB databases in
One thing to note is that the program will create on-disk databases in
the current working directory, so it is wise to run the program
somewhere with plenty of fast disk space. Existing files may interfere
with the operation of the program, so it's best to run it in its own,
Expand Down
14 changes: 0 additions & 14 deletions configure.ac
Expand Up @@ -26,20 +26,6 @@ AX_BOOST_DATE_TIME
AX_BOOST_THREAD
AX_BOOST_IOSTREAMS

AC_ARG_WITH(leveldb,
[AS_HELP_STRING([--with-leveldb=<path>], [Path containing LevelDB library (e.g: /usr), if you want to use it.])],
[LEVELDB_PREFIX=$with_leveldb])
AS_IF([test "x$LEVELDB_PREFIX" != x],
[AC_DEFINE([HAVE_LEVELDB], [1], [Define when LevelDB library is to be used.])],
[])
AM_CONDITIONAL([HAVE_LEVELDB], [test "x$LEVELDB_PREFIX" != x])

AC_SUBST(LEVELDB_PREFIX)
LEVELDB_LIBS="-L${LEVELDB_PREFIX}/lib -lleveldb -lsnappy"
LEVELDB_CFLAGS="-I${LEVELDB_PREFIX}/include"
AC_SUBST(LEVELDB_LIBS)
AC_SUBST(LEVELDB_CFLAGS)

PKG_CHECK_MODULES([PROTOBUF_LITE], "protobuf-lite")
AC_SUBST([PROTOBUF_LITE_CFLAGS])
AC_SUBST([PROTOBUF_LITE_LIBS])
Expand Down
2 changes: 1 addition & 1 deletion include/copy_elements.hpp
Expand Up @@ -7,7 +7,7 @@
#include <string>

/**
* Read the LevelDB database for users, and extract all the public data
* Read the disk database for users, and extract all the public data
* ones into a map of user ID to display name.
*/
void extract_users(std::map<int64_t, std::string> &display_name_map);
Expand Down
5 changes: 0 additions & 5 deletions include/insert_kv.hpp
Expand Up @@ -3,13 +3,8 @@

#include "config.h"

#ifdef HAVE_LEVELDB
#include <leveldb/slice.h>
typedef leveldb::Slice slice_t;
#else /* HAVE_LEVELDB */
#include <string>
typedef std::string slice_t;
#endif /* HAVE_LEVELDB */

template <typename T>
void insert_kv(T &t, const slice_t &key, const slice_t &val);
Expand Down
5 changes: 0 additions & 5 deletions src/Makefile.am
Expand Up @@ -3,11 +3,6 @@ LDADD=@LIBXML_LIBS@ @BOOST_FILESYSTEM_LIB@ @BOOST_PROGRAM_OPTIONS_LIB@ @BOOST_DA
AM_LDFLAGS=@BOOST_LDFLAGS@
AM_CPPFLAGS=-I../include @LIBXML_CFLAGS@ @BOOST_CPPFLAGS@ @PROTOBUF_LITE_CFLAGS@ @PROTOBUF_CFLAGS@

if HAVE_LEVELDB
LDADD+=@LEVELDB_LIBS@
AM_CPPFLAGS+=@LEVELDB_CFLAGS@
endif

bin_PROGRAMS=../planet-dump-ng
################################################################################
___planet_dump_ng_SOURCES=\
Expand Down
52 changes: 0 additions & 52 deletions src/copy_elements.cpp
Expand Up @@ -18,22 +18,15 @@
#include <boost/make_shared.hpp>
#include <boost/foreach.hpp>

#ifdef HAVE_LEVELDB
#include <leveldb/db.h>
#include <leveldb/options.h>
#else /* HAVE_LEVELDB */
#include <boost/filesystem.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/iostreams/operations.hpp>
#include <fstream>
#endif /* HAVE_LEVELDB */

#ifndef HAVE_LEVELDB
namespace bio = boost::iostreams;
namespace fs = boost::filesystem;
#endif /* HAVE_LEVELDB */

namespace {

Expand Down Expand Up @@ -78,50 +71,6 @@ struct thread_writer {
}
};

#ifdef HAVE_LEVELDB
// leveldb implementation of db_reader
template <typename T>
struct db_reader {
db_reader(const std::string &name) : m_db(NULL), m_itr(NULL) {
m_options.create_if_missing = false;
m_options.error_if_exists = false;

leveldb::Status status;
status = leveldb::DB::Open(m_options, name, &m_db);
if (!status.ok()) {
BOOST_THROW_EXCEPTION(std::runtime_error((boost::format("Can't open database: %1%") % status.ToString()).str()));
}

m_itr = m_db->NewIterator(m_read_options);
m_itr->SeekToFirst();
}

~db_reader() {
if (m_itr != NULL) {
delete m_itr;
}
if (m_db != NULL) {
delete m_db;
}
}

bool operator()(T &t) {
const bool valid = m_itr->Valid();
if (valid) {
leveldb::Slice key = m_itr->key();
leveldb::Slice val = m_itr->value();
insert_kv(t, key, val);
m_itr->Next();
}
return valid;
}

leveldb::DB *m_db;
leveldb::Iterator *m_itr;
leveldb::Options m_options;
leveldb::ReadOptions m_read_options;
};
#else /* HAVE_LEVELDB */
template <typename T>
struct db_reader {
explicit db_reader(const std::string &subdir) : m_end(false) {
Expand Down Expand Up @@ -169,7 +118,6 @@ struct db_reader {
std::ifstream m_file;
bio::filtering_streambuf<bio::input> m_stream;
};
#endif /* HAVE_LEVELDB */

template <>
struct db_reader<int> {
Expand Down
2 changes: 1 addition & 1 deletion src/dump_archive.cpp
Expand Up @@ -107,7 +107,7 @@ template <typename R>
bt::ptime run_thread<R>::join() {
thr.join();
if (error) {
boost::throw_exception(boost::enable_error_info(std::runtime_error("Error during archive dump to LevelDB."))
boost::throw_exception(boost::enable_error_info(std::runtime_error("Error during archive dump to disk database."))
<< boost::errinfo_nested_exception(error)
<< errinfo_table_name(table_name));
}
Expand Down
65 changes: 0 additions & 65 deletions src/dump_reader.cpp
Expand Up @@ -9,11 +9,6 @@
#include <boost/noncopyable.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>

#ifdef HAVE_LEVELDB
#include <leveldb/db.h>
#include <leveldb/options.h>
#include <leveldb/write_batch.h>
#else /* HAVE_LEVELDB */
#include <boost/filesystem.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/filter/gzip.hpp>
Expand All @@ -23,7 +18,6 @@
#include <boost/make_shared.hpp>
#include <fstream>
//#include <fcntl.h>
#endif /* HAVE_LEVELDB */

#include <boost/spirit/include/qi.hpp>
#include <boost/foreach.hpp>
Expand All @@ -38,22 +32,17 @@
namespace {

namespace qi = boost::spirit::qi;
#ifndef HAVE_LEVELDB
namespace bio = boost::iostreams;
namespace fs = boost::filesystem;
#endif /* !HAVE_LEVELDB */

struct tag_copy_header;
struct tag_leveldb_status;

typedef boost::error_info<tag_copy_header, std::string> copy_header;
typedef boost::error_info<tag_leveldb_status, std::string> leveldb_status;

struct popen_error : public boost::exception, std::exception {};
struct fread_error : public boost::exception, std::exception {};
struct early_termination_error : public boost::exception, std::exception {};
struct copy_header_parse_error : public boost::exception, std::exception {};
struct leveldb_error : public boost::exception, std::exception {};

typedef boost::shared_ptr<FILE> pipe_ptr;

Expand Down Expand Up @@ -245,59 +234,6 @@ struct filter_copy_contents
std::string m_table_name;
};

#ifdef HAVE_LEVELDB
struct db_writer {
explicit db_writer(const std::string &table_name)
: m_db(NULL),
m_batch(),
m_batch_size(0),
m_write_options() {

leveldb::Options options;
options.create_if_missing = true;
options.error_if_exists = true;

// bigger write buffer, as this is a write-heavy process...
options.write_buffer_size = 128 * 1024 * 1024;

leveldb::Status status = leveldb::DB::Open(options, table_name, &m_db);
if (!status.ok()) {
BOOST_THROW_EXCEPTION(leveldb_error() << leveldb_status(status.ToString()));
}
}

~db_writer() {
delete m_db;
}

void finish() {
if (m_batch_size > 0) {
m_db->Write(m_write_options, &m_batch);
m_batch.Clear();
m_batch_size = 0;
}
m_db->CompactRange(NULL, NULL);
}

void put(const std::string &k, const std::string &v) {
m_batch.Put(k, v);
++m_batch_size;

if (m_batch_size >= BATCH_SIZE) {
m_db->Write(m_write_options, &m_batch);
m_batch.Clear();
m_batch_size = 0;
}
}

leveldb::DB *m_db;
leveldb::WriteBatch m_batch;
size_t m_batch_size;
leveldb::WriteOptions m_write_options;
};

#else /* HAVE_LEVELDB */

typedef std::pair<std::string, std::string> kv_pair_t;

struct block_reader : public boost::noncopyable {
Expand Down Expand Up @@ -607,7 +543,6 @@ struct db_writer : public boost::noncopyable {
if (tcb.m_error) { boost::rethrow_exception(tcb.m_error); }
}
};
#endif /* HAVE_LEVELDB */

} // anonymous namespace

Expand Down
4 changes: 0 additions & 4 deletions src/insert_kv.cpp
Expand Up @@ -139,11 +139,7 @@ struct unapp_item {

template <typename T>
void from_binary(const slice_t &s, T &t) {
#ifdef HAVE_LEVELDB
std::istringstream in(s.ToString());
#else /* HAVE_LEVELDB */
std::istringstream in(s);
#endif /* HAVE_LEVELDB */
bf::fold(t, 0, unapp_item(in));
}

Expand Down
6 changes: 3 additions & 3 deletions src/planet-dump.cpp
Expand Up @@ -69,12 +69,12 @@ static void get_options(int argc, char **argv, po::variables_map &vm) {
}

/**
* read the dump file in parallel to get all of the elements into leveldb
* read the dump file in parallel to get all of the elements into on-disk
* databases. this is primarily so that the data is sorted, which is not
* guaranteed in the PostgreSQL dump file. returns the maximum time seen
* in a timestamp of any element in the dump file.
*/
bt::ptime setup_leveldb_databases(const std::string &dump_file) {
bt::ptime setup_databases(const std::string &dump_file) {
std::list<boost::shared_ptr<base_thread> > threads;

threads.push_back(boost::make_shared<run_thread<changeset> >("changesets", dump_file));
Expand Down Expand Up @@ -113,7 +113,7 @@ int main(int argc, char *argv[]) {
// extract data from the dump file for the "sorted" data tables, like nodes,
// ways, relations, changesets and their associated tags, etc...
const std::string dump_file(options["dump-file"].as<std::string>());
const bt::ptime max_time = setup_leveldb_databases(dump_file);
const bt::ptime max_time = setup_databases(dump_file);

// users aren't dumped directly to the files. we only use them to build up a map
// of uid -> name where a missing uid indicates that the user doesn't have public
Expand Down

0 comments on commit 77c5707

Please sign in to comment.