Skip to content

Commit

Permalink
Merge branch 'DSpace:main' into synced
Browse files Browse the repository at this point in the history
  • Loading branch information
Ma-Tador committed May 3, 2023
2 parents 603cd9a + 7cebc65 commit 37e93e2
Show file tree
Hide file tree
Showing 65 changed files with 1,835 additions and 324 deletions.
1 change: 0 additions & 1 deletion .dockerignore
Expand Up @@ -6,6 +6,5 @@ dspace/modules/*/target/
Dockerfile.*
dspace/src/main/docker/dspace-postgres-pgcrypto
dspace/src/main/docker/dspace-postgres-pgcrypto-curl
dspace/src/main/docker/solr
dspace/src/main/docker/README.md
dspace/src/main/docker-compose/
37 changes: 35 additions & 2 deletions .github/workflows/build.yml
Expand Up @@ -79,6 +79,39 @@ jobs:
name: ${{ matrix.type }} results
path: ${{ matrix.resultsdir }}

# https://github.com/codecov/codecov-action
# Upload code coverage report to artifact, so that it can be shared with the 'codecov' job (see below)
- name: Upload code coverage report to Artifact
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.type }} coverage report
path: 'dspace/target/site/jacoco-aggregate/jacoco.xml'
retention-days: 14

# Codecov upload is a separate job in order to allow us to restart this separate from the entire build/test
# job above. This is necessary because Codecov uploads seem to randomly fail at times.
# See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954
codecov:
# Must run after 'tests' job above
needs: tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3

# Download artifacts from previous 'tests' job
- name: Download coverage artifacts
uses: actions/download-artifact@v3

# Now attempt upload to Codecov using its action.
# NOTE: We use a retry action to retry the Codecov upload if it fails the first time.
#
# Retry action: https://github.com/marketplace/actions/retry-action
# Codecov action: https://github.com/codecov/codecov-action
- name: Upload coverage to Codecov.io
uses: codecov/codecov-action@v3
uses: Wandalen/wretry.action@v1.0.36
with:
action: codecov/codecov-action@v3
# Try upload 5 times max
attempt_limit: 5
# Run again in 30 seconds
attempt_delay: 30000
83 changes: 83 additions & 0 deletions .github/workflows/docker.yml
Expand Up @@ -170,3 +170,86 @@ jobs:
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_cli.outputs.tags }}
labels: ${{ steps.meta_build_cli.outputs.labels }}

###########################################
# Build/Push the 'dspace/dspace-solr' image
###########################################
# Get Metadata for docker_build_solr step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-solr' image
id: meta_build_solr
uses: docker/metadata-action@v4
with:
images: dspace/dspace-solr
tags: ${{ env.IMAGE_TAGS }}
flavor: ${{ env.TAGS_FLAVOR }}

- name: Build and push 'dspace-solr' image
id: docker_build_solr
uses: docker/build-push-action@v3
with:
context: .
file: ./dspace/src/main/docker/dspace-solr/Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_solr.outputs.tags }}
labels: ${{ steps.meta_build_solr.outputs.labels }}

###########################################################
# Build/Push the 'dspace/dspace-postgres-pgcrypto' image
###########################################################
# Get Metadata for docker_build_postgres step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-postgres-pgcrypto' image
id: meta_build_postgres
uses: docker/metadata-action@v4
with:
images: dspace/dspace-postgres-pgcrypto
tags: ${{ env.IMAGE_TAGS }}
flavor: ${{ env.TAGS_FLAVOR }}

- name: Build and push 'dspace-postgres-pgcrypto' image
id: docker_build_postgres
uses: docker/build-push-action@v3
with:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto/
dockerfile: Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_postgres.outputs.tags }}
labels: ${{ steps.meta_build_postgres.outputs.labels }}

###########################################################
# Build/Push the 'dspace/dspace-postgres-pgcrypto' image ('-loadsql' tag)
###########################################################
# Get Metadata for docker_build_postgres_loadsql step below
- name: Sync metadata (tags, labels) from GitHub to Docker for 'dspace-postgres-pgcrypto-loadsql' image
id: meta_build_postgres_loadsql
uses: docker/metadata-action@v4
with:
images: dspace/dspace-postgres-pgcrypto
tags: ${{ env.IMAGE_TAGS }}
# Suffix all tags with "-loadsql". Otherwise, it uses the same
# tagging logic as the primary 'dspace/dspace-postgres-pgcrypto' image above.
flavor: ${{ env.TAGS_FLAVOR }}
suffix=-loadsql

- name: Build and push 'dspace-postgres-pgcrypto-loadsql' image
id: docker_build_postgres_loadsql
uses: docker/build-push-action@v3
with:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto-curl/
dockerfile: Dockerfile
platforms: ${{ env.PLATFORMS }}
# For pull requests, we run the Docker build (to ensure no PR changes break the build),
# but we ONLY do an image push to DockerHub if it's NOT a PR
push: ${{ github.event_name != 'pull_request' }}
# Use tags / labels provided by 'docker/metadata-action' above
tags: ${{ steps.meta_build_postgres_loadsql.outputs.tags }}
labels: ${{ steps.meta_build_postgres_loadsql.outputs.labels }}
2 changes: 1 addition & 1 deletion .github/workflows/issue_opened.yml
Expand Up @@ -16,7 +16,7 @@ jobs:
# Only add to project board if issue is flagged as "needs triage" or has no labels
# NOTE: By default we flag new issues as "needs triage" in our issue template
if: (contains(github.event.issue.labels.*.name, 'needs triage') || join(github.event.issue.labels.*.name) == '')
uses: actions/add-to-project@v0.3.0
uses: actions/add-to-project@v0.5.0
# Note, the authentication token below is an ORG level Secret.
# It must be created/recreated manually via a personal access token with admin:org, project, public_repo permissions
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token#permissions-for-the-github_token
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/label_merge_conflicts.yml
Expand Up @@ -23,7 +23,7 @@ jobs:
steps:
# See: https://github.com/prince-chrismc/label-merge-conflicts-action
- name: Auto-label PRs with merge conflicts
uses: prince-chrismc/label-merge-conflicts-action@v2
uses: prince-chrismc/label-merge-conflicts-action@v3
# Add "merge conflict" label if a merge conflict is detected. Remove it when resolved.
# Note, the authentication token is created automatically
# See: https://docs.github.com/en/actions/configuring-and-managing-workflows/authenticating-with-the-github_token
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Expand Up @@ -31,7 +31,7 @@ ARG TARGET_DIR=dspace-installer
COPY --from=build /install /dspace-src
WORKDIR /dspace-src
# Create the initial install deployment using ANT
ENV ANT_VERSION 1.10.12
ENV ANT_VERSION 1.10.13
ENV ANT_HOME /tmp/ant-$ANT_VERSION
ENV PATH $ANT_HOME/bin:$PATH
# Need wget to install ant
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile.cli
Expand Up @@ -30,7 +30,7 @@ ARG TARGET_DIR=dspace-installer
COPY --from=build /install /dspace-src
WORKDIR /dspace-src
# Create the initial install deployment using ANT
ENV ANT_VERSION 1.10.12
ENV ANT_VERSION 1.10.13
ENV ANT_HOME /tmp/ant-$ANT_VERSION
ENV PATH $ANT_HOME/bin:$PATH
# Need wget to install ant
Expand Down
38 changes: 21 additions & 17 deletions docker-compose.yml
Expand Up @@ -62,13 +62,17 @@ services:
while (!</dev/tcp/dspacedb/5432) > /dev/null 2>&1; do sleep 1; done;
/dspace/bin/dspace database migrate
catalina.sh run
# DSpace database container
# DSpace PostgreSQL database container
dspacedb:
container_name: dspacedb
# Uses a custom Postgres image with pgcrypto installed
image: "${DOCKER_OWNER:-dspace}/dspace-postgres-pgcrypto:${DSPACE_VER:-dspace-7_x}"
build:
# Must build out of subdirectory to have access to install script for pgcrypto
context: ./dspace/src/main/docker/dspace-postgres-pgcrypto/
environment:
PGDATA: /pgdata
# Uses a custom Postgres image with pgcrypto installed
image: dspace/dspace-postgres-pgcrypto
POSTGRES_PASSWORD: dspace
networks:
dspacenet:
ports:
Expand All @@ -77,12 +81,17 @@ services:
stdin_open: true
tty: true
volumes:
# Keep Postgres data directory between reboots
- pgdata:/pgdata
# DSpace Solr container
dspacesolr:
container_name: dspacesolr
# Uses official Solr image at https://hub.docker.com/_/solr/
image: solr:8.11-slim
image: "${DOCKER_OWNER:-dspace}/dspace-solr:${DSPACE_VER:-dspace-7_x}"
build:
context: .
dockerfile: ./dspace/src/main/docker/dspace-solr/Dockerfile
args:
SOLR_VERSION: "${SOLR_VER:-8.11}"
networks:
dspacenet:
ports:
Expand All @@ -92,30 +101,25 @@ services:
tty: true
working_dir: /var/solr/data
volumes:
# Mount our local Solr core configs so that they are available as Solr configsets on container
- ./dspace/solr/authority:/opt/solr/server/solr/configsets/authority
- ./dspace/solr/oai:/opt/solr/server/solr/configsets/oai
- ./dspace/solr/search:/opt/solr/server/solr/configsets/search
- ./dspace/solr/statistics:/opt/solr/server/solr/configsets/statistics
# Keep Solr data directory between reboots
- solr_data:/var/solr/data
# Initialize all DSpace Solr cores using the mounted local configsets (see above), then start Solr
# Initialize all DSpace Solr cores then start Solr:
# * First, run precreate-core to create the core (if it doesn't yet exist). If exists already, this is a no-op
# * Second, copy updated configs from mounted configsets to this core. If it already existed, this updates core
# to the latest configs. If it's a newly created core, this is a no-op.
# * Second, copy configsets to this core:
# Updates to Solr configs require the container to be rebuilt/restarted: `docker compose -p d7 up -d --build dspacesolr`
entrypoint:
- /bin/bash
- '-c'
- |
init-var-solr
precreate-core authority /opt/solr/server/solr/configsets/authority
cp -r -u /opt/solr/server/solr/configsets/authority/* authority
cp -r /opt/solr/server/solr/configsets/authority/* authority
precreate-core oai /opt/solr/server/solr/configsets/oai
cp -r -u /opt/solr/server/solr/configsets/oai/* oai
cp -r /opt/solr/server/solr/configsets/oai/* oai
precreate-core search /opt/solr/server/solr/configsets/search
cp -r -u /opt/solr/server/solr/configsets/search/* search
cp -r /opt/solr/server/solr/configsets/search/* search
precreate-core statistics /opt/solr/server/solr/configsets/statistics
cp -r -u /opt/solr/server/solr/configsets/statistics/* statistics
cp -r /opt/solr/server/solr/configsets/statistics/* statistics
exec solr -f
volumes:
assetstore:
Expand Down
2 changes: 1 addition & 1 deletion dspace-api/pom.xml
Expand Up @@ -776,7 +776,7 @@
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
<version>20180130</version>
<version>20230227</version>
</dependency>

<!-- Useful for testing command-line tools -->
Expand Down
18 changes: 13 additions & 5 deletions dspace-api/src/main/java/org/dspace/app/util/SyndicationFeed.java
Expand Up @@ -51,6 +51,7 @@
import org.dspace.content.service.CommunityService;
import org.dspace.content.service.ItemService;
import org.dspace.core.Context;
import org.dspace.core.I18nUtil;
import org.dspace.discovery.IndexableObject;
import org.dspace.discovery.indexobject.IndexableCollection;
import org.dspace.discovery.indexobject.IndexableCommunity;
Expand Down Expand Up @@ -91,6 +92,7 @@ public class SyndicationFeed {

// default DC fields for entry
protected String defaultTitleField = "dc.title";
protected String defaultDescriptionField = "dc.description";
protected String defaultAuthorField = "dc.contributor.author";
protected String defaultDateField = "dc.date.issued";
private static final String[] defaultDescriptionFields =
Expand Down Expand Up @@ -196,15 +198,15 @@ public void populate(HttpServletRequest request, Context context, IndexableObjec
// dso is null for the whole site, or a search without scope
if (dso == null) {
defaultTitle = configurationService.getProperty("dspace.name");
feed.setDescription(localize(labels, MSG_FEED_DESCRIPTION));
defaultDescriptionField = localize(labels, MSG_FEED_DESCRIPTION);
objectURL = resolveURL(request, null);
} else {
Bitstream logo = null;
if (dso instanceof IndexableCollection) {
Collection col = ((IndexableCollection) dso).getIndexedObject();
defaultTitle = col.getName();
feed.setDescription(collectionService.getMetadataFirstValue(col,
CollectionService.MD_SHORT_DESCRIPTION, Item.ANY));
defaultDescriptionField = collectionService.getMetadataFirstValue(col,
CollectionService.MD_SHORT_DESCRIPTION, Item.ANY);
logo = col.getLogo();
String cols = configurationService.getProperty("webui.feed.podcast.collections");
if (cols != null && cols.length() > 1 && cols.contains(col.getHandle())) {
Expand All @@ -214,8 +216,8 @@ public void populate(HttpServletRequest request, Context context, IndexableObjec
} else if (dso instanceof IndexableCommunity) {
Community comm = ((IndexableCommunity) dso).getIndexedObject();
defaultTitle = comm.getName();
feed.setDescription(communityService.getMetadataFirstValue(comm,
CommunityService.MD_SHORT_DESCRIPTION, Item.ANY));
defaultDescriptionField = communityService.getMetadataFirstValue(comm,
CommunityService.MD_SHORT_DESCRIPTION, Item.ANY);
logo = comm.getLogo();
String comms = configurationService.getProperty("webui.feed.podcast.communities");
if (comms != null && comms.length() > 1 && comms.contains(comm.getHandle())) {
Expand All @@ -230,6 +232,12 @@ public void populate(HttpServletRequest request, Context context, IndexableObjec
}
feed.setTitle(labels.containsKey(MSG_FEED_TITLE) ?
localize(labels, MSG_FEED_TITLE) : defaultTitle);

if (defaultDescriptionField == null || defaultDescriptionField == "") {
defaultDescriptionField = I18nUtil.getMessage("org.dspace.app.util.SyndicationFeed.no-description");
}

feed.setDescription(defaultDescriptionField);
feed.setLink(objectURL);
feed.setPublishedDate(new Date());
feed.setUri(objectURL);
Expand Down
Expand Up @@ -52,11 +52,6 @@ public class IPAuthentication implements AuthenticationMethod {
*/
private static Logger log = org.apache.logging.log4j.LogManager.getLogger(IPAuthentication.class);

/**
* Whether to look for x-forwarded headers for logging IP addresses
*/
protected static Boolean useProxies;

/**
* All the IP matchers
*/
Expand Down Expand Up @@ -250,7 +245,7 @@ public List<Group> getSpecialGroups(Context context, HttpServletRequest request)

log.debug(LogHelper.getHeader(context, "authenticated",
"special_groups=" + gsb.toString()
+ " (by IP=" + addr + ", useProxies=" + useProxies.toString() + ")"
+ " (by IP=" + addr + ")"
));
}

Expand Down
Expand Up @@ -332,8 +332,8 @@ public void updateLastModified(Context context, Bitstream bitstream) {
}

@Override
public List<Bitstream> findDeletedBitstreams(Context context) throws SQLException {
return bitstreamDAO.findDeletedBitstreams(context);
public List<Bitstream> findDeletedBitstreams(Context context, int limit, int offset) throws SQLException {
return bitstreamDAO.findDeletedBitstreams(context, limit, offset);
}

@Override
Expand Down
10 changes: 8 additions & 2 deletions dspace-api/src/main/java/org/dspace/content/DSpaceObject.java
Expand Up @@ -48,6 +48,12 @@ public abstract class DSpaceObject implements Serializable, ReloadableEntity<jav
@Transient
private StringBuffer eventDetails = null;

/**
* The same order should be applied inside this comparator
* {@link MetadataValueComparators#defaultComparator} to preserve
* ordering while the list has been modified and not yet persisted
* and reloaded.
*/
@OneToMany(fetch = FetchType.LAZY, mappedBy = "dSpaceObject", cascade = CascadeType.ALL, orphanRemoval = true)
@OrderBy("metadataField, place")
private List<MetadataValue> metadata = new ArrayList<>();
Expand Down Expand Up @@ -116,7 +122,7 @@ protected void addDetails(String d) {
* @return summary of event details, or null if there are none.
*/
public String getDetails() {
return (eventDetails == null ? null : eventDetails.toString());
return eventDetails == null ? null : eventDetails.toString();
}

/**
Expand Down Expand Up @@ -145,7 +151,7 @@ public UUID getID() {
* one
*/
public String getHandle() {
return (CollectionUtils.isNotEmpty(handles) ? handles.get(0).getHandle() : null);
return CollectionUtils.isNotEmpty(handles) ? handles.get(0).getHandle() : null;
}

void setHandle(List<Handle> handle) {
Expand Down

0 comments on commit 37e93e2

Please sign in to comment.