Skip to content

Commit

Permalink
PagedResults review, basic fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
aaime committed Nov 2, 2017
1 parent 24f147d commit adacc46
Show file tree
Hide file tree
Showing 11 changed files with 131 additions and 173 deletions.
4 changes: 2 additions & 2 deletions doc/en/user/source/community/nsg-profile/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ This means that this type of navigation will always be sequential, if the client
#. request page four and use the provided next URI to retrieve page five

This is not an ideal solution to access random pages, which is common action.
PageResults operation will improve this by allowing clients to request random pages directly.
PageResults operation adds a standard way to perform request random pages directly.

Installing the extension
------------------------
Expand Down Expand Up @@ -156,7 +156,7 @@ The available parameters are this ones:
:header-rows: 1

* - Name
- Mandotry
- Mandatory
- Default Value
* - service
- YES
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,44 +5,42 @@

package org.geoserver.nsg.pagination.random;

import java.util.Map;
import java.util.concurrent.TimeUnit;

import org.geoserver.platform.resource.Resource;
import org.geotools.data.DataStore;

import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
*
* Class used to store the index result type configuration managed by {@link IndexInitializer}
*
*
* @author sandr
*
*/
public class IndexConfiguration {

private DataStore currentDataStore;

private Resource storageResource;

private Long timeToLiveInSec = 600l;
private Long timeToLiveInSec = 600L;

private Map<String, Object> currentDataStoreParams;

/**
* Store the DB parameters and the relative {@link DataStore}
*
*
* @param currentDataStoreParams
* @param currentDataStore
*/
public void setCurrentDataStore(Map<String, Object> currentDataStoreParams,
DataStore currentDataStore) {
DataStore currentDataStore) {
this.currentDataStoreParams = currentDataStoreParams;
this.currentDataStore = currentDataStore;
}

/**
* Store the reference to resource used to archive the serialized GetFeatureRequest
*
*
* @param storageResource
*/
public void setStorageResource(Resource storageResource) {
Expand All @@ -51,7 +49,7 @@ public void setStorageResource(Resource storageResource) {

/**
* Store the value of time to live of stored GetFeatureRequest
*
*
* @param timeToLive
* @param timeUnit
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,6 @@

package org.geoserver.nsg.pagination.random;

import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.geoserver.config.GeoServer;
import org.geoserver.config.GeoServerDataDirectory;
import org.geoserver.config.GeoServerInitializer;
Expand All @@ -44,8 +30,21 @@
import org.opengis.feature.simple.SimpleFeatureType;
import org.opengis.filter.Filter;

import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
*
* Class used to parse the configuration properties stored in <b>nsg-profile</b> module folder:
* <ul>
* <li><b>resultSets.storage.path</b> path where to store the serialized GetFeatureRequest with name
Expand All @@ -69,13 +68,12 @@
* path content should be moved to the new one,
* <li>When the the time to live is changed the {@link #clean()} procedure will update.
* </ul>
*
* <p>
* The class is also responsible to {@link #clean()} the stored requests (result sets) that have not
* been used for a period of time bigger than the configured time to live value
* <p>
*
* @author sandr
*
* @author sandr
*/

public final class IndexInitializer implements GeoServerInitializer {
Expand All @@ -88,58 +86,52 @@ public final class IndexInitializer implements GeoServerInitializer {

static final String MODULE_DIR = "nsg-profile";

public static final String STORE_SCHEMA_NAME = "RESULT_SET";
static final String STORE_SCHEMA_NAME = "RESULT_SET";

public static final String STORE_SCHEMA = "ID:java.lang.String,created:java.lang.Long,updated:java.lang.Long";
static final String STORE_SCHEMA = "ID:java.lang.String,created:java.lang.Long,updated:java.lang.Long";

private IndexConfiguration indexConfiguration;

/*
* Lock to synchronize activity of clean task with listener that changes the DB and file
* resources
*/
protected static final ReadWriteLock READ_WRITE_LOCK = new ReentrantReadWriteLock();
static final ReadWriteLock READ_WRITE_LOCK = new ReentrantReadWriteLock();

@Override
public void initialize(GeoServer geoServer) throws Exception {
indexConfiguration = GeoServerExtensions.bean(IndexConfiguration.class);
GeoServerResourceLoader loader = GeoServerExtensions.bean(GeoServerResourceLoader.class);
GeoServerDataDirectory dd = new GeoServerDataDirectory(loader);
Resource resource = dd.get(MODULE_DIR + "/" + PROPERTY_FILENAME);
if (loader != null) {
File directory = loader.findOrCreateDirectory(MODULE_DIR);
File file = new File(directory, PROPERTY_FILENAME);
// Create default configuration file
if (!file.exists()) {
InputStream stream = IndexInitializer.class
.getResourceAsStream("/" + PROPERTY_FILENAME);
Properties properties = new Properties();
if (resource.getType() == Resource.Type.UNDEFINED) {
Properties properties = new Properties();
try (InputStream stream = IndexInitializer.class
.getResourceAsStream("/" + PROPERTY_FILENAME)) {
properties.load(stream);
// Replace GEOSERVER_DATA_DIR placeholder
properties.replaceAll((k, v) -> ((String) v).replace("${GEOSERVER_DATA_DIR}",
dd.root().getPath()));
stream.close();
// Create resource and save properties
}
// Create resource and save properties
try (OutputStream out = resource.out()) {
properties.store(out, null);
out.close();
} catch (Exception exception) {
throw new RuntimeException("Error initializing paged results configurations.", exception);
}
}
loadConfigurations(resource);
// Listen for changes in configuration file and reload properties
resource.addListener(notify -> {
if (notify.getKind() == Kind.ENTRY_MODIFY) {
try {
OutputStream out = resource.out();
properties.store(out, null);
out.close();
loadConfigurations(resource);
} catch (Exception exception) {
throw new RuntimeException("Error to initialize configurations.", exception);
throw new RuntimeException("Error reload configurations.", exception);
}
}
loadConfigurations(resource);
// Listen for changes in configuration file and reload properties
resource.addListener(notify -> {
if (notify.getKind() == Kind.ENTRY_MODIFY) {
try {
loadConfigurations(resource);
} catch (Exception exception) {
throw new RuntimeException("Error reload configurations.", exception);
}
}
});
}
});
}

/**
Expand Down Expand Up @@ -168,7 +160,7 @@ private void loadConfigurations(Resource resource) throws Exception {
properties.get(PROPERTY_DB_PREFIX + JDBCDataStoreFactory.USER.key));
params.put(JDBCDataStoreFactory.PASSWD.key,
properties.get(PROPERTY_DB_PREFIX + JDBCDataStoreFactory.PASSWD.key));
/**
/*
* When the index DB is changed the new DB should be used and the content of the old
* table moved to the new table. If the new DB already has the index table it should be
* emptied
Expand All @@ -178,7 +170,7 @@ private void loadConfigurations(Resource resource) throws Exception {
* If the storage path is changed, the new storage path should be used and the old
* storage path content should be moved to the new one
*/
manageStorageChange(resource, properties.get("resultSets.storage.path"));
manageStorageChange(properties.get("resultSets.storage.path"));
/*
* Change time to live
*/
Expand Down Expand Up @@ -208,7 +200,7 @@ private void manageTimeToLiveChange(Object timneToLive) {
* Helper method that move resources files form current folder to the new one, current storage
* is deleted
*/
private void manageStorageChange(Resource resource, Object newStorage) {
private void manageStorageChange(Object newStorage) {
try {
if (newStorage != null) {
String newStorageStr = (String) newStorage;
Expand Down Expand Up @@ -236,15 +228,15 @@ private void manageDBChange(Map<String, Object> params) {
// New database is valid and is different from current one
if (newDataStore != null && !isDBTheSame(params)) {
// Create table in new database
createTable(newDataStore, true);
createFeatureType(newDataStore, true);
// Move data to new database
moveData(exDataStore, newDataStore);
// Dispose old database
exDataStore.dispose();
}
} else {
// Create schema
createTable(newDataStore, false);
createFeatureType(newDataStore, false);
}
indexConfiguration.setCurrentDataStore(params, newDataStore);
} catch (Exception exception) {
Expand All @@ -260,44 +252,44 @@ private Boolean isDBTheSame(Map<String, Object> newParams) {
boolean isTheSame = (currentParams.get(JDBCDataStoreFactory.DBTYPE.key) == null
&& newParams.get(JDBCDataStoreFactory.DBTYPE.key) == null)
|| (currentParams.get(JDBCDataStoreFactory.DBTYPE.key) != null
&& newParams.get(JDBCDataStoreFactory.DBTYPE.key) != null
&& currentParams.get(JDBCDataStoreFactory.DBTYPE.key)
.equals(newParams.get(JDBCDataStoreFactory.DBTYPE.key)));
&& newParams.get(JDBCDataStoreFactory.DBTYPE.key) != null
&& currentParams.get(JDBCDataStoreFactory.DBTYPE.key)
.equals(newParams.get(JDBCDataStoreFactory.DBTYPE.key)));
isTheSame = isTheSame
&& (currentParams.get(JDBCDataStoreFactory.DATABASE.key) == null
&& newParams.get(JDBCDataStoreFactory.DATABASE.key) == null)
&& newParams.get(JDBCDataStoreFactory.DATABASE.key) == null)
|| (currentParams.get(JDBCDataStoreFactory.DATABASE.key) != null
&& newParams.get(JDBCDataStoreFactory.DATABASE.key) != null
&& currentParams.get(JDBCDataStoreFactory.DATABASE.key)
.equals(newParams.get(JDBCDataStoreFactory.DATABASE.key)));
&& newParams.get(JDBCDataStoreFactory.DATABASE.key) != null
&& currentParams.get(JDBCDataStoreFactory.DATABASE.key)
.equals(newParams.get(JDBCDataStoreFactory.DATABASE.key)));
isTheSame = isTheSame
&& (currentParams.get(JDBCDataStoreFactory.HOST.key) == null
&& newParams.get(JDBCDataStoreFactory.HOST.key) == null)
&& newParams.get(JDBCDataStoreFactory.HOST.key) == null)
|| (currentParams.get(JDBCDataStoreFactory.HOST.key) != null
&& newParams.get(JDBCDataStoreFactory.HOST.key) != null
&& currentParams.get(JDBCDataStoreFactory.HOST.key)
.equals(newParams.get(JDBCDataStoreFactory.HOST.key)));
&& newParams.get(JDBCDataStoreFactory.HOST.key) != null
&& currentParams.get(JDBCDataStoreFactory.HOST.key)
.equals(newParams.get(JDBCDataStoreFactory.HOST.key)));
isTheSame = isTheSame
&& (currentParams.get(JDBCDataStoreFactory.PORT.key) == null
&& newParams.get(JDBCDataStoreFactory.PORT.key) == null)
&& newParams.get(JDBCDataStoreFactory.PORT.key) == null)
|| (currentParams.get(JDBCDataStoreFactory.PORT.key) != null
&& newParams.get(JDBCDataStoreFactory.PORT.key) != null
&& currentParams.get(JDBCDataStoreFactory.PORT.key)
.equals(newParams.get(JDBCDataStoreFactory.PORT.key)));
&& newParams.get(JDBCDataStoreFactory.PORT.key) != null
&& currentParams.get(JDBCDataStoreFactory.PORT.key)
.equals(newParams.get(JDBCDataStoreFactory.PORT.key)));
isTheSame = isTheSame
&& (currentParams.get(JDBCDataStoreFactory.SCHEMA.key) == null
&& newParams.get(JDBCDataStoreFactory.SCHEMA.key) == null)
&& newParams.get(JDBCDataStoreFactory.SCHEMA.key) == null)
|| (currentParams.get(JDBCDataStoreFactory.SCHEMA.key) != null
&& newParams.get(JDBCDataStoreFactory.SCHEMA.key) != null
&& currentParams.get(JDBCDataStoreFactory.SCHEMA.key)
.equals(newParams.get(JDBCDataStoreFactory.SCHEMA.key)));
&& newParams.get(JDBCDataStoreFactory.SCHEMA.key) != null
&& currentParams.get(JDBCDataStoreFactory.SCHEMA.key)
.equals(newParams.get(JDBCDataStoreFactory.SCHEMA.key)));
return isTheSame;
}

/**
* Helper method that create a new table on DB to store resource informations
*/
private void createTable(DataStore dataStore, boolean forceDelete) throws Exception {
private void createFeatureType(DataStore dataStore, boolean forceDelete) throws Exception {
boolean exists = dataStore.getNames().contains(new NameImpl(STORE_SCHEMA_NAME));
// Schema exists
if (exists) {
Expand Down Expand Up @@ -358,15 +350,12 @@ public void clean() throws Exception {
SimpleFeatureCollection toRemoved = store.getFeatures(filter);
// Remove file
Resource currentResource = indexConfiguration.getStorageResource();
SimpleFeatureIterator iterator = toRemoved.features();
try {
try (SimpleFeatureIterator iterator = toRemoved.features()) {
while (iterator.hasNext()) {
SimpleFeature feature = iterator.next();
currentResource.get(feature.getID()).delete();
featureRemoved++;
}
} finally {
iterator.close();
}
store.removeFeatures(filter);
}
Expand All @@ -375,12 +364,12 @@ public void clean() throws Exception {
LOGGER.finest("CLEAN executed, removed " + featureRemoved
+ " stored requests older than "
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
.format(new Date(liveTreshold)));
.format(new Date(liveTreshold)));
}
}
} catch (Throwable t) {
session.rollback();
LOGGER.warning("Error on clean data");
LOGGER.log(Level.WARNING, "Error on clean data", t);
} finally {
session.close();
IndexInitializer.READ_WRITE_LOCK.writeLock().unlock();
Expand Down

0 comments on commit adacc46

Please sign in to comment.