diff --git a/.github/workflows/cypress_ui.yml.future b/.github/workflows/cypress_ui.yml.future
index b38ae2f9558..0823233fdeb 100644
--- a/.github/workflows/cypress_ui.yml.future
+++ b/.github/workflows/cypress_ui.yml.future
@@ -2,6 +2,7 @@
#
# THIS IS AN OLD TRAVIS-CI.ORG JOB FILE
# To be used with Github Actions, it would be necessary to refactor it.
+# In addition, it needs to be rewritten to use our modern containers.
# Keeping it as the future example it has been before.
# See also #5846
#
@@ -30,8 +31,6 @@ jobs:
directories:
# we also need to cache folder with Cypress binary
- ~/.cache
- # we want to cache the Glassfish and Solr dependencies as well
- - conf/docker-aio/dv/deps
before_install:
- cd tests
install:
diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml
index 94ba041e135..dc0d19914a1 100644
--- a/.github/workflows/shellcheck.yml
+++ b/.github/workflows/shellcheck.yml
@@ -33,7 +33,6 @@ jobs:
# Exclude old scripts
exclude: |
*/.git/*
- conf/docker-aio/*
doc/*
downloads/*
scripts/database/*
diff --git a/.gitignore b/.gitignore
index d38538fc364..5a2da73fb2c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,17 +39,6 @@ scripts/api/setup-all.*.log
# ctags generated tag file
tags
-# dependencies I'm not sure we're allowed to redistribute / have in version control
-conf/docker-aio/dv/deps/
-
-# no need to check aoi installer zip into vc
-conf/docker-aio/dv/install/dvinstall.zip
-# or copy of test data
-conf/docker-aio/testdata/
-
-# docker-aio creates maven/ which reports 86 new files. ignore this wd.
-maven/
-
scripts/installer/default.config
*.pem
@@ -71,8 +60,5 @@ scripts/search/data/binary/trees.png.thumb140
src/main/webapp/resources/images/cc0.png.thumb140
src/main/webapp/resources/images/dataverseproject.png.thumb140
-# apache-maven is downloaded by docker-aio
-apache-maven*
-
# Docker development volumes
/docker-dev-volumes
diff --git a/conf/docker-aio/0prep_deps.sh b/conf/docker-aio/0prep_deps.sh
deleted file mode 100755
index 13a91705303..00000000000
--- a/conf/docker-aio/0prep_deps.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-if [ ! -d dv/deps ]; then
- mkdir -p dv/deps
-fi
-wdir=`pwd`
-
-if [ ! -e dv/deps/payara-5.2022.3.zip ]; then
- echo "payara dependency prep"
- wget https://s3-eu-west-1.amazonaws.com/payara.fish/Payara+Downloads/5.2022.3/payara-5.2022.3.zip -O dv/deps/payara-5.2022.3.zip
-fi
-
-if [ ! -e dv/deps/solr-8.11.1dv.tgz ]; then
- echo "solr dependency prep"
- # schema changes *should* be the only ones...
- cd dv/deps/
- wget https://archive.apache.org/dist/lucene/solr/8.11.1/solr-8.11.1.tgz -O solr-8.11.1dv.tgz
- cd ../../
-fi
-
diff --git a/conf/docker-aio/1prep.sh b/conf/docker-aio/1prep.sh
deleted file mode 100755
index 508d41d93ff..00000000000
--- a/conf/docker-aio/1prep.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# move things necessary for integration tests into build context.
-# this was based off the phoenix deployment; and is likely uglier and bulkier than necessary in a perfect world
-
-mkdir -p testdata/doc/sphinx-guides/source/_static/util/
-cp ../solr/8.11.1/schema*.xml testdata/
-cp ../solr/8.11.1/solrconfig.xml testdata/
-cp ../jhove/jhove.conf testdata/
-cp ../jhove/jhoveConfig.xsd testdata/
-cd ../../
-cp -r scripts conf/docker-aio/testdata/
-cp doc/sphinx-guides/source/_static/util/createsequence.sql conf/docker-aio/testdata/doc/sphinx-guides/source/_static/util/
-
-wget -q https://downloads.apache.org/maven/maven-3/3.8.5/binaries/apache-maven-3.8.5-bin.tar.gz
-tar xfz apache-maven-3.8.5-bin.tar.gz
-mkdir maven
-mv apache-maven-3.8.5/* maven/
-echo "export JAVA_HOME=/usr/lib/jvm/jre-openjdk" > maven/maven.sh
-echo "export M2_HOME=../maven" >> maven/maven.sh
-echo "export MAVEN_HOME=../maven" >> maven/maven.sh
-echo "export PATH=../maven/bin:${PATH}" >> maven/maven.sh
-chmod 0755 maven/maven.sh
-
-# not using dvinstall.zip for setupIT.bash; but still used in install.bash for normal ops
-source maven/maven.sh && mvn clean
-./scripts/installer/custom-build-number
-source maven/maven.sh && mvn package
-cd scripts/installer
-make clean
-make
-mkdir -p ../../conf/docker-aio/dv/install
-cp dvinstall.zip ../../conf/docker-aio/dv/install/
-
-# ITs sometimes need files server-side
-# yes, these copies could be avoided by moving the build root here. but the build
-# context is already big enough that it seems worth avoiding.
-cd ../../
-cp src/test/java/edu/harvard/iq/dataverse/makedatacount/sushi_sample_logs.json conf/docker-aio/testdata/
diff --git a/conf/docker-aio/c8.dockerfile b/conf/docker-aio/c8.dockerfile
deleted file mode 100644
index 0002464cbf2..00000000000
--- a/conf/docker-aio/c8.dockerfile
+++ /dev/null
@@ -1,87 +0,0 @@
-FROM rockylinux/rockylinux:latest
-# OS dependencies
-# IQSS now recommends Postgres 13.
-RUN dnf -qy module disable postgresql
-RUN yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm
-
-RUN echo "fastestmirror=true" >> /etc/dnf/dnf.conf
-RUN yum install -y java-11-openjdk-devel postgresql13-server sudo epel-release unzip curl httpd python2 diffutils
-RUN yum install -y jq lsof awscli
-
-# for older search scripts
-RUN ln -s /usr/bin/python2 /usr/bin/python
-
-# copy and unpack dependencies (solr, payara)
-COPY dv /tmp/dv
-COPY testdata/schema*.xml /tmp/dv/
-COPY testdata/solrconfig.xml /tmp/dv
-
-# ITs need files
-COPY testdata/sushi_sample_logs.json /tmp/
-
-# IPv6 and localhost appears to be related to some of the intermittant connection issues
-COPY disableipv6.conf /etc/sysctl.d/
-RUN rm /etc/httpd/conf/*
-COPY httpd.conf /etc/httpd/conf
-RUN cd /opt ; tar zxf /tmp/dv/deps/solr-8.11.1dv.tgz
-RUN cd /opt ; unzip /tmp/dv/deps/payara-5.2022.3.zip ; ln -s /opt/payara5 /opt/glassfish4
-
-# this copy of domain.xml is the result of running `asadmin set server.monitoring-service.module-monitoring-levels.jvm=LOW` on a default glassfish installation (aka - enable the glassfish REST monitir endpoint for the jvm`
-# this dies under Java 11, do we keep it?
-#COPY domain-restmonitor.xml /opt/payara5/glassfish/domains/domain1/config/domain.xml
-
-RUN sudo -u postgres /usr/pgsql-13/bin/initdb -D /var/lib/pgsql/13/data -E 'UTF-8'
-
-# copy configuration related files
-RUN cp /tmp/dv/pg_hba.conf /var/lib/pgsql/13/data/
-RUN cp -r /opt/solr-8.11.1/server/solr/configsets/_default /opt/solr-8.11.1/server/solr/collection1
-RUN cp /tmp/dv/schema*.xml /opt/solr-8.11.1/server/solr/collection1/conf/
-RUN cp /tmp/dv/solrconfig.xml /opt/solr-8.11.1/server/solr/collection1/conf/solrconfig.xml
-
-# skipping payara user and solr user (run both as root)
-
-#solr port
-EXPOSE 8983
-
-# postgres port
-EXPOSE 5432
-
-# payara port
-EXPOSE 8080
-
-# apache port, http
-EXPOSE 80
-
-# debugger ports (jmx,jdb)
-EXPOSE 8686
-EXPOSE 9009
-
-RUN mkdir /opt/dv
-
-# keeping the symlink on the off chance that something else is still assuming /usr/local/glassfish4
-RUN ln -s /opt/payara5 /usr/local/glassfish4
-COPY dv/install/ /opt/dv/
-COPY install.bash /opt/dv/
-COPY entrypoint.bash /opt/dv/
-COPY testdata /opt/dv/testdata
-COPY testscripts/* /opt/dv/testdata/
-COPY setupIT.bash /opt/dv
-WORKDIR /opt/dv
-
-# need to take DOI provider info from build args as of ec377d2a4e27424db8815c55ce544deee48fc5e0
-# Default to EZID; use built-args to switch to DataCite (or potentially handles)
-#ARG DoiProvider=EZID
-ARG DoiProvider=FAKE
-ARG doi_baseurl=https://ezid.cdlib.org
-ARG doi_username=apitest
-ARG doi_password=apitest
-ENV DoiProvider=${DoiProvider}
-ENV doi_baseurl=${doi_baseurl}
-ENV doi_username=${doi_username}
-ENV doi_password=${doi_password}
-COPY configure_doi.bash /opt/dv
-
-# healthcheck for payara only (assumes modified domain.xml);
-# does not check dataverse application status.
-HEALTHCHECK CMD curl --fail http://localhost:4848/monitoring/domain/server.json || exit 1
-CMD ["/opt/dv/entrypoint.bash"]
diff --git a/conf/docker-aio/configure_doi.bash b/conf/docker-aio/configure_doi.bash
deleted file mode 100755
index f0f0bc6d0d4..00000000000
--- a/conf/docker-aio/configure_doi.bash
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-cd /opt/payara5
-
-# if appropriate; reconfigure PID provider on the basis of environmental variables.
-if [ ! -z "${DoiProvider}" ]; then
- curl -X PUT -d ${DoiProvider} http://localhost:8080/api/admin/settings/:DoiProvider
-fi
-if [ ! -z "${doi_username}" ]; then
- bin/asadmin create-jvm-options "-Ddoi.username=${doi_username}"
-fi
-if [ ! -z "${doi_password}" ]; then
- bin/asadmin create-jvm-options "-Ddoi.password=${doi_password}"
-fi
-if [ ! -z "${doi_baseurl}" ]; then
- bin/asadmin delete-jvm-options "-Ddoi.baseurlstring=https\://mds.test.datacite.org"
- doi_baseurl_esc=`echo ${doi_baseurl} | sed -e 's/:/\\\:/'`
- bin/asadmin create-jvm-options "-Ddoi.baseurlstring=${doi_baseurl_esc}"
-fi
-if [ ! -z "${doi_dataciterestapiurl}" ]; then
- bin/asadmin delete-jvm-options "-Ddoi.dataciterestapiurlstring=https\://api.test.datacite.org"
- doi_dataciterestapiurl_esc=`echo ${doi_dataciterestapiurl} | sed -e 's/:/\\\:/'`
- bin/asadmin create-jvm-options "-Ddoi.dataciterestapiurlstring=${doi_dataciterestapiurl_esc}"
-fi
diff --git a/conf/docker-aio/disableipv6.conf b/conf/docker-aio/disableipv6.conf
deleted file mode 100644
index 8d425183e3f..00000000000
--- a/conf/docker-aio/disableipv6.conf
+++ /dev/null
@@ -1 +0,0 @@
-net.ipv6.conf.all.disable_ipv6 = 1
diff --git a/conf/docker-aio/domain-restmonitor.xml b/conf/docker-aio/domain-restmonitor.xml
deleted file mode 100644
index a18a88ab011..00000000000
--- a/conf/docker-aio/domain-restmonitor.xml
+++ /dev/null
@@ -1,486 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -XX:MaxPermSize=192m
- -client
- -Djava.awt.headless=true
- -Djdk.corba.allowOutputStreamSubclass=true
- -Djavax.xml.accessExternalSchema=all
- -Djavax.management.builder.initial=com.sun.enterprise.v3.admin.AppServerMBeanServerBuilder
- -XX:+UnlockDiagnosticVMOptions
- -Djava.endorsed.dirs=${com.sun.aas.installRoot}/modules/endorsed${path.separator}${com.sun.aas.installRoot}/lib/endorsed
- -Djava.security.policy=${com.sun.aas.instanceRoot}/config/server.policy
- -Djava.security.auth.login.config=${com.sun.aas.instanceRoot}/config/login.conf
- -Dcom.sun.enterprise.security.httpsOutboundKeyAlias=s1as
- -Xmx512m
- -Djavax.net.ssl.keyStore=${com.sun.aas.instanceRoot}/config/keystore.jks
- -Djavax.net.ssl.trustStore=${com.sun.aas.instanceRoot}/config/cacerts.jks
- -Djava.ext.dirs=${com.sun.aas.javaRoot}/lib/ext${path.separator}${com.sun.aas.javaRoot}/jre/lib/ext${path.separator}${com.sun.aas.instanceRoot}/lib/ext
- -Djdbc.drivers=org.apache.derby.jdbc.ClientDriver
- -DANTLR_USE_DIRECT_CLASS_LOADING=true
- -Dcom.sun.enterprise.config.config_environment_factory_class=com.sun.enterprise.config.serverbeans.AppserverConfigEnvironmentFactory
-
- -Dorg.glassfish.additionalOSGiBundlesToStart=org.apache.felix.shell,org.apache.felix.gogo.runtime,org.apache.felix.gogo.shell,org.apache.felix.gogo.command,org.apache.felix.shell.remote,org.apache.felix.fileinstall
-
-
- -Dosgi.shell.telnet.port=6666
-
- -Dosgi.shell.telnet.maxconn=1
-
- -Dosgi.shell.telnet.ip=127.0.0.1
-
- -Dgosh.args=--nointeractive
-
- -Dfelix.fileinstall.dir=${com.sun.aas.installRoot}/modules/autostart/
-
- -Dfelix.fileinstall.poll=5000
-
- -Dfelix.fileinstall.log.level=2
-
- -Dfelix.fileinstall.bundles.new.start=true
-
- -Dfelix.fileinstall.bundles.startTransient=true
-
- -Dfelix.fileinstall.disableConfigSave=false
-
- -XX:NewRatio=2
-
- -Dcom.ctc.wstx.returnNullForDefaultNamespace=true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -XX:MaxPermSize=192m
- -server
- -Djava.awt.headless=true
- -Djdk.corba.allowOutputStreamSubclass=true
- -XX:+UnlockDiagnosticVMOptions
- -Djava.endorsed.dirs=${com.sun.aas.installRoot}/modules/endorsed${path.separator}${com.sun.aas.installRoot}/lib/endorsed
- -Djava.security.policy=${com.sun.aas.instanceRoot}/config/server.policy
- -Djava.security.auth.login.config=${com.sun.aas.instanceRoot}/config/login.conf
- -Dcom.sun.enterprise.security.httpsOutboundKeyAlias=s1as
- -Djavax.net.ssl.keyStore=${com.sun.aas.instanceRoot}/config/keystore.jks
- -Djavax.net.ssl.trustStore=${com.sun.aas.instanceRoot}/config/cacerts.jks
- -Djava.ext.dirs=${com.sun.aas.javaRoot}/lib/ext${path.separator}${com.sun.aas.javaRoot}/jre/lib/ext${path.separator}${com.sun.aas.instanceRoot}/lib/ext
- -Djdbc.drivers=org.apache.derby.jdbc.ClientDriver
- -DANTLR_USE_DIRECT_CLASS_LOADING=true
- -Dcom.sun.enterprise.config.config_environment_factory_class=com.sun.enterprise.config.serverbeans.AppserverConfigEnvironmentFactory
- -XX:NewRatio=2
- -Xmx512m
-
- -Dorg.glassfish.additionalOSGiBundlesToStart=org.apache.felix.shell,org.apache.felix.gogo.runtime,org.apache.felix.gogo.shell,org.apache.felix.gogo.command,org.apache.felix.fileinstall
-
- -Dosgi.shell.telnet.port=${OSGI_SHELL_TELNET_PORT}
-
- -Dosgi.shell.telnet.maxconn=1
-
- -Dosgi.shell.telnet.ip=127.0.0.1
-
- -Dgosh.args=--noshutdown -c noop=true
-
- -Dfelix.fileinstall.dir=${com.sun.aas.installRoot}/modules/autostart/
-
- -Dfelix.fileinstall.poll=5000
-
- -Dfelix.fileinstall.log.level=3
-
- -Dfelix.fileinstall.bundles.new.start=true
-
- -Dfelix.fileinstall.bundles.startTransient=true
-
- -Dfelix.fileinstall.disableConfigSave=false
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/conf/docker-aio/dv/install/default.config b/conf/docker-aio/dv/install/default.config
deleted file mode 100644
index 0b806a8714b..00000000000
--- a/conf/docker-aio/dv/install/default.config
+++ /dev/null
@@ -1,15 +0,0 @@
-HOST_DNS_ADDRESS localhost
-GLASSFISH_DIRECTORY /opt/glassfish4
-ADMIN_EMAIL
-MAIL_SERVER mail.hmdc.harvard.edu
-POSTGRES_ADMIN_PASSWORD secret
-POSTGRES_SERVER db
-POSTGRES_PORT 5432
-POSTGRES_DATABASE dvndb
-POSTGRES_USER dvnapp
-POSTGRES_PASSWORD secret
-SOLR_LOCATION idx
-RSERVE_HOST localhost
-RSERVE_PORT 6311
-RSERVE_USER rserve
-RSERVE_PASSWORD rserve
diff --git a/conf/docker-aio/dv/pg_hba.conf b/conf/docker-aio/dv/pg_hba.conf
deleted file mode 100644
index 77feba5247d..00000000000
--- a/conf/docker-aio/dv/pg_hba.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# PostgreSQL Client Authentication Configuration File
-# ===================================================
-#
-# Refer to the "Client Authentication" section in the PostgreSQL
-# documentation for a complete description of this file. A short
-# synopsis follows.
-#
-# This file controls: which hosts are allowed to connect, how clients
-# are authenticated, which PostgreSQL user names they can use, which
-# databases they can access. Records take one of these forms:
-#
-# local DATABASE USER METHOD [OPTIONS]
-# host DATABASE USER ADDRESS METHOD [OPTIONS]
-# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
-# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
-#
-# (The uppercase items must be replaced by actual values.)
-#
-# The first field is the connection type: "local" is a Unix-domain
-# socket, "host" is either a plain or SSL-encrypted TCP/IP socket,
-# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a
-# plain TCP/IP socket.
-#
-# DATABASE can be "all", "sameuser", "samerole", "replication", a
-# database name, or a comma-separated list thereof. The "all"
-# keyword does not match "replication". Access to replication
-# must be enabled in a separate record (see example below).
-#
-# USER can be "all", a user name, a group name prefixed with "+", or a
-# comma-separated list thereof. In both the DATABASE and USER fields
-# you can also write a file name prefixed with "@" to include names
-# from a separate file.
-#
-# ADDRESS specifies the set of hosts the record matches. It can be a
-# host name, or it is made up of an IP address and a CIDR mask that is
-# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
-# specifies the number of significant bits in the mask. A host name
-# that starts with a dot (.) matches a suffix of the actual host name.
-# Alternatively, you can write an IP address and netmask in separate
-# columns to specify the set of hosts. Instead of a CIDR-address, you
-# can write "samehost" to match any of the server's own IP addresses,
-# or "samenet" to match any address in any subnet that the server is
-# directly connected to.
-#
-# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi",
-# "krb5", "ident", "peer", "pam", "ldap", "radius" or "cert". Note that
-# "password" sends passwords in clear text; "md5" is preferred since
-# it sends encrypted passwords.
-#
-# OPTIONS are a set of options for the authentication in the format
-# NAME=VALUE. The available options depend on the different
-# authentication methods -- refer to the "Client Authentication"
-# section in the documentation for a list of which options are
-# available for which authentication methods.
-#
-# Database and user names containing spaces, commas, quotes and other
-# special characters must be quoted. Quoting one of the keywords
-# "all", "sameuser", "samerole" or "replication" makes the name lose
-# its special character, and just match a database or username with
-# that name.
-#
-# This file is read on server startup and when the postmaster receives
-# a SIGHUP signal. If you edit the file on a running system, you have
-# to SIGHUP the postmaster for the changes to take effect. You can
-# use "pg_ctl reload" to do that.
-
-# Put your actual configuration here
-# ----------------------------------
-#
-# If you want to allow non-local connections, you need to add more
-# "host" records. In that case you will also need to make PostgreSQL
-# listen on a non-local interface via the listen_addresses
-# configuration parameter, or via the -i or -h command line switches.
-
-
-
-# TYPE DATABASE USER ADDRESS METHOD
-
-# "local" is for Unix domain socket connections only
-#local all all peer
-local all all trust
-# IPv4 local connections:
-#host all all 127.0.0.1/32 trust
-host all all 0.0.0.0/0 trust
-# IPv6 local connections:
-host all all ::1/128 trust
-# Allow replication connections from localhost, by a user with the
-# replication privilege.
-#local replication postgres peer
-#host replication postgres 127.0.0.1/32 ident
-#host replication postgres ::1/128 ident
diff --git a/conf/docker-aio/entrypoint.bash b/conf/docker-aio/entrypoint.bash
deleted file mode 100755
index 236bb30f67a..00000000000
--- a/conf/docker-aio/entrypoint.bash
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-export LANG=en_US.UTF-8
-sudo -u postgres /usr/pgsql-13/bin/pg_ctl start -D /var/lib/pgsql/13/data &
-cd /opt/solr-8.11.1/
-# TODO: Run Solr as non-root and remove "-force".
-bin/solr start -force
-bin/solr create_core -c collection1 -d server/solr/collection1/conf -force
-
-# start apache, in both foreground and background...
-apachectl -DFOREGROUND &
-
-# TODO: Run Payara as non-root.
-cd /opt/payara5
-bin/asadmin start-domain --debug
-sleep infinity
-
diff --git a/conf/docker-aio/httpd.conf b/conf/docker-aio/httpd.conf
deleted file mode 100644
index 85c851d785f..00000000000
--- a/conf/docker-aio/httpd.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-
-Include conf.d/*.conf
-Include conf.modules.d/*.conf
-ServerName localhost
-Listen 80 443
-PidFile run/httpd.pid
-DocumentRoot "/var/www/html"
-TypesConfig /etc/mime.types
-User apache
-Group apache
-
-
- ServerName localhost
- LogLevel debug
- ErrorLog logs/error_log
- LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
- CustomLog logs/access_log combined
-
- # proxy config (aka - what to send to glassfish or not)
- ProxyPassMatch ^/Shibboleth.sso !
- ProxyPassMatch ^/shibboleth-ds !
- # pass everything else to Glassfish
- ProxyPass / ajp://localhost:8009/
-# glassfish can be slow sometimes
- ProxyTimeout 300
-
-
diff --git a/conf/docker-aio/install.bash b/conf/docker-aio/install.bash
deleted file mode 100755
index 2b3275ad830..00000000000
--- a/conf/docker-aio/install.bash
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-sudo -u postgres createuser --superuser dvnapp
-#./entrypoint.bash &
-unzip dvinstall.zip
-cd dvinstall/
-echo "beginning installer"
-./install -admin_email=dvAdmin@mailinator.com -y -f > install.out 2> install.err
-
-echo "installer complete"
-cat install.err
diff --git a/conf/docker-aio/prep_it.bash b/conf/docker-aio/prep_it.bash
deleted file mode 100755
index adb257e43b1..00000000000
--- a/conf/docker-aio/prep_it.bash
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env bash
-
-# run through all the steps to setup docker-aio to run integration tests
-
-# hard-codes several assumptions: image is named dv0, container is named dv, port is 8084
-
-# glassfish healthy/ready retries
-n_wait=5
-
-cd conf/docker-aio
-./0prep_deps.sh
-./1prep.sh
-docker build -t dv0 -f c8.dockerfile .
-# cleanup from previous runs if necessary
-docker rm -f dv
-# start container
-docker run -d -p 8084:80 -p 8083:8080 -p 9010:9009 --name dv dv0
-# wait for glassfish to be healthy
-i_wait=0
-d_wait=10
-while [ $i_wait -lt $n_wait ]
-do
- h=`docker inspect -f "{{.State.Health.Status}}" dv`
- if [ "healthy" == "${h}" ]; then
- break
- else
- sleep $d_wait
- fi
- i_wait=$(( $i_wait + 1 ))
-
-done
-# try setupIT.bash
-docker exec dv /opt/dv/setupIT.bash
-err=$?
-if [ $err -ne 0 ]; then
- echo "error - setupIT failure"
- exit 1
-fi
-# configure DOI provider based on docker build arguments / environmental variables
-docker exec dv /opt/dv/configure_doi.bash
-err=$?
-if [ $err -ne 0 ]; then
- echo "error - DOI configuration failure"
- exit 1
-fi
-# handle config for the private url test (and things like publishing...)
-./seturl.bash
-
-
-cd ../..
-#echo "docker-aio ready to run integration tests ($i_retry)"
-echo "docker-aio ready to run integration tests"
-curl http://localhost:8084/api/info/version
-echo $?
-
diff --git a/conf/docker-aio/readme.md b/conf/docker-aio/readme.md
deleted file mode 100644
index f3031a5bb6e..00000000000
--- a/conf/docker-aio/readme.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Docker All-In-One
-
-> :information_source: **NOTE: Sunsetting of this module is imminent.** There is no schedule yet, but expect it to go away.
-> Please let the [Dataverse Containerization Working Group](https://ct.gdcc.io) know if you are a user and
-> what should be preserved.
-
-First pass docker all-in-one image, intended for running integration tests against.
-Also usable for normal development and system evaluation; not intended for production.
-
-### Requirements:
- - java11 compiler, maven, make, wget, docker
-
-### Quickstart:
- - in the root of the repository, run `./conf/docker-aio/prep_it.bash`
- - if using DataCite test credentials, update the build args appropriately.
- - if all goes well, you should see the results of the `api/info/version` endpoint, including the deployed build (eg `{"status":"OK","data":{"version":"4.8.6","build":"develop-c3e9f40"}}`). If not, you may need to read the non-quickstart instructions.
- - run integration tests: `./conf/docker-aio/run-test-suite.sh`
-
-----
-
-## More in-depth documentation:
-
-
-### Initial setup (aka - do once):
-- `cd conf/docker-aio` and run `./0prep_deps.sh` to created Payara and Solr tarballs in `conf/docker-aio/dv/deps`.
-
-### Per-build:
-
-> Note: If you encounter any issues, see the Troubleshooting section at the end of this document.
-
-#### Setup
-
-- `cd conf/docker-aio`, and run `./1prep.sh` to copy files for integration test data into docker build context; `1prep.sh` will also build the war file and installation zip file
-- build the docker image: `docker build -t dv0 -f c8.dockerfile .`
-
-- Run image: `docker run -d -p 8083:8080 -p 8084:80 --name dv dv0` (aka - forward port 8083 locally to 8080 in the container for payara, and 8084 to 80 for apache); if you'd like to connect a java debugger to payara, use `docker run -d -p 8083:8080 -p 8084:80 -p 9010:9009 --name dv dv0`
-
-- Installation (integration test): `docker exec dv /opt/dv/setupIT.bash`
- (Note that it's possible to customize the installation by editing `conf/docker-aio/default.config` and running `docker exec dv /opt/dv/install.bash` but for the purposes of integration testing, the `setupIT.bash` script above works fine.)
-
-- update `dataverse.siteUrl` (appears only necessary for `DatasetsIT.testPrivateUrl`): `docker exec dv /usr/local/glassfish4/bin/asadmin create-jvm-options "-Ddataverse.siteUrl=http\://localhost\:8084"` (or use the provided `seturl.bash`)
-
-#### Run integration tests:
-
-First, cd back to the root of the repo where the `pom.xml` file is (`cd ../..` assuming you're still in the `conf/docker-aio` directory). Then run the test suite with script below:
-
-`conf/docker-aio/run-test-suite.sh`
-
-There isn't any strict requirement on the local port (8083, 8084 in this doc), the name of the image (dv0) or container (dv), these can be changed as desired as long as they are consistent.
-
-### Troubleshooting Notes:
-
-* If Dataverse' build fails due to an error about `Module` being ambiguous, you might be using a Java 9 compiler.
-
-* If you see an error like this:
- ```
- docker: Error response from daemon: Conflict. The container name "/dv" is already in use by container "5f72a45b68c86c7b0f4305b83ce7d663020329ea4e30fa2a3ce9ddb05223533d"
- You have to remove (or rename) that container to be able to reuse that name.
- ```
- run something like `docker ps -a | grep dv` to see the container left over from the last run and something like `docker rm 5f72a45b68c8` to remove it. Then try the `docker run` command above again.
-
-* `empty reply from server` or `Failed to connect to ::1: Cannot assign requested address` tend to indicate either that you haven't given payara enough time to start, or your docker setup is in an inconsistent state and should probably be restarted.
-
-* For manually fiddling around with the created dataverse, use user `dataverseAdmin` with password `admin1`.
diff --git a/conf/docker-aio/run-test-suite.sh b/conf/docker-aio/run-test-suite.sh
deleted file mode 100755
index 39809a7a50e..00000000000
--- a/conf/docker-aio/run-test-suite.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-# This is the canonical list of which "IT" tests are expected to pass.
-
-dvurl=$1
-if [ -z "$dvurl" ]; then
- dvurl="http://localhost:8084"
-fi
-
-integrationtests=$(
- ServerName localhost
- LogLevel debug
- ErrorLog logs/error_log
- LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
- CustomLog logs/access_log combined
-
- # proxy config (aka - what to send to glassfish or not)
- ProxyPassMatch ^/Shibboleth.sso !
- ProxyPassMatch ^/shibboleth-ds !
- # pass everything else to Glassfish
- ProxyPass / ajp://localhost:8009/
-# glassfish can be slow sometimes
- ProxyTimeout 300
-
-
diff --git a/conf/docker-aio/testscripts/db.sh b/conf/docker-aio/testscripts/db.sh
deleted file mode 100755
index f0a9e409fd7..00000000000
--- a/conf/docker-aio/testscripts/db.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-psql -U postgres -c "CREATE ROLE dvnapp PASSWORD 'secret' SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN" template1
-psql -U dvnapp -c 'CREATE DATABASE "dvndb" WITH OWNER = "dvnapp"' template1
diff --git a/conf/docker-aio/testscripts/install b/conf/docker-aio/testscripts/install
deleted file mode 100755
index f87f180b554..00000000000
--- a/conf/docker-aio/testscripts/install
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-export HOST_ADDRESS=localhost
-export GLASSFISH_ROOT=/opt/payara5
-export FILES_DIR=/opt/payara5/glassfish/domains/domain1/files
-export DB_NAME=dvndb
-export DB_PORT=5432
-export DB_HOST=localhost
-export DB_USER=dvnapp
-export DB_PASS=secret
-export RSERVE_HOST=localhost
-export RSERVE_PORT=6311
-export RSERVE_USER=rserve
-export RSERVE_PASS=rserve
-export SMTP_SERVER=localhost
-export MEM_HEAP_SIZE=2048
-export GLASSFISH_DOMAIN=domain1
-cd scripts/installer
-#cp ../../conf/jhove/jhove.conf $GLASSFISH_ROOT/glassfish/domains/$GLASSFISH_DOMAIN/config/jhove.conf
-cp /opt/dv/testdata/jhove.conf $GLASSFISH_ROOT/glassfish/domains/$GLASSFISH_DOMAIN/config/jhove.conf
-cp /opt/dv/testdata/jhoveConfig.xsd $GLASSFISH_ROOT/glassfish/domains/$GLASSFISH_DOMAIN/config/jhoveConfig.xsd
-./as-setup.sh dvndb
diff --git a/conf/docker-aio/testscripts/post b/conf/docker-aio/testscripts/post
deleted file mode 100755
index 0f292109d31..00000000000
--- a/conf/docker-aio/testscripts/post
+++ /dev/null
@@ -1,13 +0,0 @@
-#/bin/sh
-cd scripts/api
-./setup-all.sh --insecure -p=admin1 | tee /tmp/setup-all.sh.out
-cd ../..
-psql -U dvnapp dvndb -f doc/sphinx-guides/source/_static/util/createsequence.sql
-scripts/search/tests/publish-dataverse-root
-#git checkout scripts/api/data/dv-root.json
-scripts/search/tests/grant-authusers-add-on-root
-scripts/search/populate-users
-scripts/search/create-users
-scripts/search/tests/create-all-and-test
-scripts/search/tests/publish-spruce1-and-test
-#java -jar downloads/schemaSpy_5.0.0.jar -t pgsql -host localhost -db dvndb -u postgres -p secret -s public -dp scripts/installer/pgdriver/postgresql-9.1-902.jdbc4.jar -o /var/www/html/schemaspy/latest
diff --git a/conf/docker-dcm/.gitignore b/conf/docker-dcm/.gitignore
deleted file mode 100644
index ac39981ce6a..00000000000
--- a/conf/docker-dcm/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.rpm
-upload*.bash
diff --git a/conf/docker-dcm/0prep.sh b/conf/docker-dcm/0prep.sh
deleted file mode 100755
index 300aa39d567..00000000000
--- a/conf/docker-dcm/0prep.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-DCM_VERSION=0.5
-RSAL_VERSION=0.1
-
-if [ ! -e dcm-${DCM_VERSION}-0.noarch.rpm ]; then
- wget https://github.com/sbgrid/data-capture-module/releases/download/${DCM_VERSION}/dcm-${DCM_VERSION}-0.noarch.rpm
-fi
-
-if [ ! -e rsal-${RSAL_VERSION}-0.noarch.rpm ] ;then
- wget https://github.com/sbgrid/rsal/releases/download/${RSAL_VERSION}/rsal-${RSAL_VERSION}-0.noarch.rpm
-fi
diff --git a/conf/docker-dcm/c6client.dockerfile b/conf/docker-dcm/c6client.dockerfile
deleted file mode 100644
index e4d1ae7da82..00000000000
--- a/conf/docker-dcm/c6client.dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-# build from repo root
-FROM centos:6
-RUN yum install -y epel-release
-RUN yum install -y rsync openssh-clients jq curl wget lynx
-RUN useradd depositor
-USER depositor
-WORKDIR /home/depositor
diff --git a/conf/docker-dcm/cfg/dcm/bashrc b/conf/docker-dcm/cfg/dcm/bashrc
deleted file mode 100644
index 07137ab8471..00000000000
--- a/conf/docker-dcm/cfg/dcm/bashrc
+++ /dev/null
@@ -1,18 +0,0 @@
-# .bashrc
-
-# User specific aliases and functions
-
-alias rm='rm -i'
-alias cp='cp -i'
-alias mv='mv -i'
-
-# Source global definitions
-if [ -f /etc/bashrc ]; then
- . /etc/bashrc
-fi
-
-# these are dummy values, obviously
-export UPLOADHOST=dcmsrv
-export DVAPIKEY=burrito
-export DVHOSTINT=dvsrv
-export DVHOST=dvsrv
diff --git a/conf/docker-dcm/cfg/dcm/entrypoint-dcm.sh b/conf/docker-dcm/cfg/dcm/entrypoint-dcm.sh
deleted file mode 100755
index 0db674bfac4..00000000000
--- a/conf/docker-dcm/cfg/dcm/entrypoint-dcm.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-/etc/init.d/sshd start
-/etc/init.d/redis start
-/etc/init.d/rq start
-lighttpd -D -f /etc/lighttpd/lighttpd.conf
diff --git a/conf/docker-dcm/cfg/dcm/healthcheck-dcm.sh b/conf/docker-dcm/cfg/dcm/healthcheck-dcm.sh
deleted file mode 100755
index 3964a79391e..00000000000
--- a/conf/docker-dcm/cfg/dcm/healthcheck-dcm.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-r_rq=`/etc/init.d/rq status`
-if [ "rq_worker running" != "$r_rq" ]; then
- echo "rq failed"
- exit 1
-fi
-r_www=`/etc/init.d/lighttpd status`
-e_www=$?
-if [ 0 -ne $e_www ]; then
- echo "lighttpd failed"
- exit 2
-fi
-
diff --git a/conf/docker-dcm/cfg/dcm/rq-init-d b/conf/docker-dcm/cfg/dcm/rq-init-d
deleted file mode 100755
index 093cd894376..00000000000
--- a/conf/docker-dcm/cfg/dcm/rq-init-d
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# chkconfig: 2345 90 60
-# description: rq worker script (single worker process)
-
-# example rq configuration file (to be placed in /etc/init.d)
-
-# works on cent6
-
-DAEMON=rq_worker
-DAEMON_PATH=/opt/dcm/gen/
-export UPLOADHOST=dcmsrv
-VIRTUALENV=
-LOGFILE=/var/log/${DAEMON}.log
-PIDFILE=/var/run/${DAEMON}.pid
-
-case "$1" in
-start)
- printf "%-50s" "starting $DAEMON..."
- cd $DAEMON_PATH
- if [ ! -z "$VIRTUALENV" ]; then
- source $VIRTUALENV/bin/activate
- fi
- rq worker normal --pid $PIDFILE > ${LOGFILE} 2>&1 &
-;;
-status)
- if [ -f $PIDFILE ]; then
- PID=`cat $PIDFILE`
- if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then
- printf "%s\n" "$DAEMON not running, but PID file ($PIDFILE) exists"
- else
- echo "$DAEMON running"
- fi
- else
- printf "%s\n" "$DAEMON not running"
- fi
-;;
-stop)
- printf "%-50s" "stopping $DAEMON"
- if [ -f $PIDFILE ]; then
- PID=`cat $PIDFILE`
- kill -HUP $PID
- rm -f $PIDFILE
- else
- printf "%s\n" "no PID file ($PIDFILE) - maybe not running"
- fi
-;;
-restart)
- $0 stop
- $0 start
-;;
-
-*)
- echo "Usage: $0 {status|start|stop|restart}"
- exit 1
-esac
-
diff --git a/conf/docker-dcm/cfg/dcm/test_install.sh b/conf/docker-dcm/cfg/dcm/test_install.sh
deleted file mode 100755
index 3026ceb9fa5..00000000000
--- a/conf/docker-dcm/cfg/dcm/test_install.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-cp /etc/dcm/rq-init-d /etc/init.d/rq
-cp /etc/dcm/lighttpd-conf-dcm /etc/lighttpd/lighttpd.conf
-cp /etc/dcm/lighttpd-modules-dcm /etc/lighttpd/modules.conf
-cp /etc/dcm/dcm-rssh.conf /etc/rssh.conf
-
diff --git a/conf/docker-dcm/cfg/rsal/entrypoint-rsal.sh b/conf/docker-dcm/cfg/rsal/entrypoint-rsal.sh
deleted file mode 100755
index 92466c3bd4b..00000000000
--- a/conf/docker-dcm/cfg/rsal/entrypoint-rsal.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-#/usr/bin/rsync --no-detach --daemon --config /etc/rsyncd.conf
-/usr/bin/rsync --daemon --config /etc/rsyncd.conf
-lighttpd -D -f /etc/lighttpd/lighttpd.conf
diff --git a/conf/docker-dcm/cfg/rsal/lighttpd-modules.conf b/conf/docker-dcm/cfg/rsal/lighttpd-modules.conf
deleted file mode 100644
index cdb1438af82..00000000000
--- a/conf/docker-dcm/cfg/rsal/lighttpd-modules.conf
+++ /dev/null
@@ -1,174 +0,0 @@
-#######################################################################
-##
-## ansible managed
-#
-## Modules to load
-## -----------------
-##
-## at least mod_access and mod_accesslog should be loaded
-## all other module should only be loaded if really neccesary
-##
-## - saves some time
-## - saves memory
-##
-## the default module set contains:
-##
-## "mod_indexfile", "mod_dirlisting", "mod_staticfile"
-##
-## you dont have to include those modules in your list
-##
-## Modules, which are pulled in via conf.d/*.conf
-##
-## NOTE: the order of modules is important.
-##
-## - mod_accesslog -> conf.d/access_log.conf
-## - mod_compress -> conf.d/compress.conf
-## - mod_status -> conf.d/status.conf
-## - mod_webdav -> conf.d/webdav.conf
-## - mod_cml -> conf.d/cml.conf
-## - mod_evhost -> conf.d/evhost.conf
-## - mod_simple_vhost -> conf.d/simple_vhost.conf
-## - mod_mysql_vhost -> conf.d/mysql_vhost.conf
-## - mod_trigger_b4_dl -> conf.d/trigger_b4_dl.conf
-## - mod_userdir -> conf.d/userdir.conf
-## - mod_rrdtool -> conf.d/rrdtool.conf
-## - mod_ssi -> conf.d/ssi.conf
-## - mod_cgi -> conf.d/cgi.conf
-## - mod_scgi -> conf.d/scgi.conf
-## - mod_fastcgi -> conf.d/fastcgi.conf
-## - mod_proxy -> conf.d/proxy.conf
-## - mod_secdownload -> conf.d/secdownload.conf
-## - mod_expire -> conf.d/expire.conf
-##
-
-server.modules = (
- "mod_access",
-# "mod_alias",
-# "mod_auth",
-# "mod_evasive",
-# "mod_redirect",
-# "mod_rewrite",
-# "mod_setenv",
-# "mod_usertrack",
-)
-
-##
-#######################################################################
-
-#######################################################################
-##
-## Config for various Modules
-##
-
-##
-## mod_ssi
-##
-#include "conf.d/ssi.conf"
-
-##
-## mod_status
-##
-#include "conf.d/status.conf"
-
-##
-## mod_webdav
-##
-#include "conf.d/webdav.conf"
-
-##
-## mod_compress
-##
-#include "conf.d/compress.conf"
-
-##
-## mod_userdir
-##
-#include "conf.d/userdir.conf"
-
-##
-## mod_magnet
-##
-#include "conf.d/magnet.conf"
-
-##
-## mod_cml
-##
-#include "conf.d/cml.conf"
-
-##
-## mod_rrdtool
-##
-#include "conf.d/rrdtool.conf"
-
-##
-## mod_proxy
-##
-#include "conf.d/proxy.conf"
-
-##
-## mod_expire
-##
-#include "conf.d/expire.conf"
-
-##
-## mod_secdownload
-##
-#include "conf.d/secdownload.conf"
-
-##
-#######################################################################
-
-#######################################################################
-##
-## CGI modules
-##
-
-##
-## SCGI (mod_scgi)
-##
-#include "conf.d/scgi.conf"
-
-##
-## FastCGI (mod_fastcgi)
-##
-#include "conf.d/fastcgi.conf"
-
-##
-## plain old CGI (mod_cgi)
-##
-include "conf.d/cgi.conf"
-
-##
-#######################################################################
-
-#######################################################################
-##
-## VHost Modules
-##
-## Only load ONE of them!
-## ========================
-##
-
-##
-## You can use conditionals for vhosts aswell.
-##
-## see http://www.lighttpd.net/documentation/configuration.html
-##
-
-##
-## mod_evhost
-##
-#include "conf.d/evhost.conf"
-
-##
-## mod_simple_vhost
-##
-#include "conf.d/simple_vhost.conf"
-
-##
-## mod_mysql_vhost
-##
-#include "conf.d/mysql_vhost.conf"
-
-##
-#######################################################################
diff --git a/conf/docker-dcm/cfg/rsal/lighttpd.conf b/conf/docker-dcm/cfg/rsal/lighttpd.conf
deleted file mode 100644
index 5874d60eb48..00000000000
--- a/conf/docker-dcm/cfg/rsal/lighttpd.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-## lighttpd configuration customized for RSAL; centos7
-
-# refuse connections not from frontend or localhost
-# DO NOT HAVE THIS OPEN TO THE WORLD!!!
-#$HTTP["remoteip"] !~ "192.168.2.2|127.0.0.1" {
-#url.access-deny = ("")
-#}
-server.breakagelog = "/var/log/lighttpd/breakage.log"
-
-#######################################################################
-##
-## Some Variable definition which will make chrooting easier.
-##
-## if you add a variable here. Add the corresponding variable in the
-## chroot example aswell.
-##
-var.log_root = "/var/log/lighttpd"
-var.server_root = "/opt/rsal/api"
-var.state_dir = "/var/run"
-var.home_dir = "/var/lib/lighttpd"
-var.conf_dir = "/etc/lighttpd"
-
-var.cache_dir = "/var/cache/lighttpd"
-var.socket_dir = home_dir + "/sockets"
-include "modules.conf"
-server.port = 80
-server.use-ipv6 = "disable"
-server.username = "lighttpd"
-server.groupname = "lighttpd"
-server.document-root = server_root
-server.pid-file = state_dir + "/lighttpd.pid"
-server.errorlog = log_root + "/error.log"
-include "conf.d/access_log.conf"
-include "conf.d/debug.conf"
-server.event-handler = "linux-sysepoll"
-server.network-backend = "linux-sendfile"
-server.stat-cache-engine = "simple"
-server.max-connections = 1024
-static-file.exclude-extensions = ( ".php", ".pl", ".fcgi", ".scgi" )
-include "conf.d/mime.conf"
-include "conf.d/dirlisting.conf"
-server.follow-symlink = "enable"
-server.upload-dirs = ( "/var/tmp" )
diff --git a/conf/docker-dcm/cfg/rsal/rsyncd.conf b/conf/docker-dcm/cfg/rsal/rsyncd.conf
deleted file mode 100644
index 5a15ab28a12..00000000000
--- a/conf/docker-dcm/cfg/rsal/rsyncd.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-lock file=/var/run/rsync.lock
-log file=/var/log/rsyncd.log
-pid file=/var/log/rsyncd.pid
-
-[10.5072]
- path=/public/
- read only=yes
-
diff --git a/conf/docker-dcm/configure_dcm.sh b/conf/docker-dcm/configure_dcm.sh
deleted file mode 100755
index 5b65b0a0314..00000000000
--- a/conf/docker-dcm/configure_dcm.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-
-echo "dcm configs on dv side to be done"
-
-# in homage to dataverse traditions, reset to insecure "burrito" admin API key
-sudo -u postgres psql -c "update apitoken set tokenstring='burrito' where id=1;" dvndb
-sudo -u postgres psql -c "update authenticateduser set superuser='t' where id=1;" dvndb
-
-# dataverse configs for DCM
-curl -X PUT -d "SHA-1" "http://localhost:8080/api/admin/settings/:FileFixityChecksumAlgorithm"
-curl -X PUT "http://localhost:8080/api/admin/settings/:UploadMethods" -d "dcm/rsync+ssh"
-curl -X PUT "http://localhost:8080/api/admin/settings/:DataCaptureModuleUrl" -d "http://dcmsrv"
-
-# configure for RSAL downloads; but no workflows or RSAL yet
-curl -X PUT "http://localhost:8080/api/admin/settings/:DownloadMethods" -d "rsal/rsync"
-
-# publish root dataverse
-curl -X POST -H "X-Dataverse-key: burrito" "http://localhost:8080/api/dataverses/root/actions/:publish"
-
-# symlink `hold` volume
-mkdir -p /usr/local/glassfish4/glassfish/domains/domain1/files/
-ln -s /hold /usr/local/glassfish4/glassfish/domains/domain1/files/10.5072
-
-# need to set siteUrl
-cd /usr/local/glassfish4
-bin/asadmin create-jvm-options "\"-Ddataverse.siteUrl=http\://localhost\:8084\""
diff --git a/conf/docker-dcm/configure_rsal.sh b/conf/docker-dcm/configure_rsal.sh
deleted file mode 100755
index 5db43a34381..00000000000
--- a/conf/docker-dcm/configure_rsal.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-
-fn=rsal-workflow2.json
-# needs an actual IP (vs a hostname) for whitelist
-rsalip=`dig +short rsalsrv`
-
-# create workflow
-curl -s -X POST -H "Content-type: application/json" -d @${fn} "http://localhost:8080/api/admin/workflows"
-
-# put rsal on the whitelist
-curl -X PUT -d "127.0.0.1;${rsalip}" "http://localhost:8080/api/admin/workflows/ip-whitelist"
-
-# set workflow as default
-curl -X PUT -d "1" "http://localhost:8080/api/admin/workflows/default/PrePublishDataset"
-
-# local access path
-curl -X PUT -d "/hpc/storage" "http://localhost:8080/api/admin/settings/:LocalDataAccessPath"
-
-# storage sites
-curl -X POST -H "Content-type: application/json" --upload-file site-primary.json "http://localhost:8080/api/admin/storageSites"
-curl -X POST -H "Content-type: application/json" --upload-file site-remote.json "http://localhost:8080/api/admin/storageSites"
diff --git a/conf/docker-dcm/create.bash b/conf/docker-dcm/create.bash
deleted file mode 100755
index 58ae6e61dc7..00000000000
--- a/conf/docker-dcm/create.bash
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-
-
-# user creates dataset
-k_d=burrito
-dv_d=root
-h=http://dvsrv
-
-fn=dataset.json
-#dset_id=`curl -s -H "X-Dataverse-key: $k_d" -X POST --upload-file $fn $h/api/dataverses/$dv_d/datasets | jq .data.id`
-r=`curl -s -H "X-Dataverse-key: $k_d" -X POST --upload-file $fn $h/api/dataverses/$dv_d/datasets`
-echo $r
-dset_id=`echo $r | jq .data.id`
-echo "dataset created with id: $dset_id"
-
-if [ "null" == "${dset_id}" ]; then
- echo "error - no dataset id from create command"
- exit 1
-fi
-echo "dataset created; internal/db id: ${dset_id}"
-
-
diff --git a/conf/docker-dcm/dataset.json b/conf/docker-dcm/dataset.json
deleted file mode 100644
index fb1b734ed40..00000000000
--- a/conf/docker-dcm/dataset.json
+++ /dev/null
@@ -1,126 +0,0 @@
-{
- "datasetVersion": {
- "metadataBlocks": {
- "citation": {
- "displayName": "Citation Metadata",
- "fields": [
- {
- "typeName": "title",
- "multiple": false,
- "typeClass": "primitive",
- "value": "DCM test dataset"
- },
- {
- "typeName": "productionDate",
- "multiple": false,
- "typeClass": "primitive",
- "value": "2017-04-01"
- },
- {
- "typeName": "dsDescription",
- "multiple": true,
- "typeClass": "compound",
- "value": [
- {
- "dsDescriptionValue": {
- "typeName": "dsDescriptionValue",
- "multiple": false,
- "typeClass": "primitive",
- "value": "this would normally be a dataset large enough to require a DCM"
- }
- }
- ]
- },
- {
- "typeName": "depositor",
- "multiple": false,
- "typeClass": "primitive",
- "value": "Doc, Bob"
- },
- {
- "typeName": "producer",
- "multiple": true,
- "typeClass": "compound",
- "value": [
- {
- "producerName": {
- "typeName": "producerName",
- "multiple": false,
- "typeClass": "primitive",
- "value": "Prof, Arthor"
- },
- "producerAffiliation": {
- "typeName": "producerAffiliation",
- "multiple": false,
- "typeClass": "primitive",
- "value": "LibraScholar"
- }
- }
- ]
- },
- {
- "typeName": "author",
- "multiple": true,
- "typeClass": "compound",
- "value": [
- {
- "authorName": {
- "typeName": "authorName",
- "multiple": false,
- "typeClass": "primitive",
- "value": "Student, Carol"
- }
- ,
- "authorAffiliation": {
- "typeName": "authorAffiliation",
- "multiple": false,
- "typeClass": "primitive",
- "value": "LibraScholar"
- }
- },
- {
- "authorName": {
- "typeName": "authorName",
- "multiple": false,
- "typeClass": "primitive",
- "value": "Doc, Bob"
- }
- ,
- "authorAffiliation": {
- "typeName": "authorAffiliation",
- "multiple": false,
- "typeClass": "primitive",
- "value": "LibraScholar"
- }
- }
-
- ]
- },
- {
- "typeName": "datasetContact",
- "multiple": true,
- "typeClass": "compound",
- "value": [
- {
- "datasetContactEmail": {
- "typeName": "datasetContactEmail",
- "multiple": false,
- "typeClass": "primitive",
- "value": "dsContact@mailinator.com"
- }
- }
- ]
- },
- {
- "typeName": "subject",
- "multiple": true,
- "typeClass": "controlledVocabulary",
- "value": [
- "Medicine, Health and Life Sciences"
- ]
- }
- ]
- }
- }
- }
-}
diff --git a/conf/docker-dcm/dcmsrv.dockerfile b/conf/docker-dcm/dcmsrv.dockerfile
deleted file mode 100644
index 9989fa3a89d..00000000000
--- a/conf/docker-dcm/dcmsrv.dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# build from repo root
-FROM centos:6
-RUN yum install -y epel-release
-ARG RPMFILE=dcm-0.5-0.noarch.rpm
-COPY ${RPMFILE} /tmp/
-COPY cfg/dcm/bashrc /root/.bashrc
-COPY cfg/dcm/test_install.sh /root/
-RUN yum localinstall -y /tmp/${RPMFILE}
-RUN pip install -r /opt/dcm/requirements.txt
-RUN pip install awscli==1.15.75
-run export PATH=~/.local/bin:$PATH
-RUN /root/test_install.sh
-COPY cfg/dcm/rq-init-d /etc/init.d/rq
-RUN useradd glassfish
-COPY cfg/dcm/entrypoint-dcm.sh /
-COPY cfg/dcm/healthcheck-dcm.sh /
-EXPOSE 80
-EXPOSE 22
-VOLUME /hold
-HEALTHCHECK CMD /healthcheck-dcm.sh
-CMD ["/entrypoint-dcm.sh"]
diff --git a/conf/docker-dcm/docker-compose.yml b/conf/docker-dcm/docker-compose.yml
deleted file mode 100644
index 49d4467d349..00000000000
--- a/conf/docker-dcm/docker-compose.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# initial docker-compose file for combined Dataverse and DCM with shared filesystem
-
-version: '3'
-
-services:
- dcmsrv:
- build:
- context: .
- dockerfile: dcmsrv.dockerfile
- container_name: dcmsrv
- volumes:
- - hold:/hold
- rsalsrv:
- build:
- context: .
- dockerfile: rsalsrv.dockerfile
- container_name: rsalsrv
-# image: rsalrepo_rsal
- volumes:
- - hold:/hold
- - ./:/mnt
- environment:
- DV_HOST: http://dvsrv:8080
- DV_APIKEY: burrito
- ports:
- - "8889:80"
- - "873:873"
- dvsrv:
- build:
- context: .
- dockerfile: dv0dcm.dockerfile
- container_name: dvsrv
- volumes:
- - hold:/hold
- - ./:/mnt
- ports:
- - "8083:8080"
- - "8084:80"
- client:
- build:
- context: .
- dockerfile: c6client.dockerfile
- command: sleep infinity
- container_name: dcm_client
- volumes:
- - ./:/mnt
-
-volumes:
- hold:
-
diff --git a/conf/docker-dcm/dv0dcm.dockerfile b/conf/docker-dcm/dv0dcm.dockerfile
deleted file mode 100644
index 021534c8978..00000000000
--- a/conf/docker-dcm/dv0dcm.dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-# dv0 assumed to be image name for docker-aio
-FROM dv0
-RUN yum install -y bind-utils
-COPY configure_dcm.sh /opt/dv/
-COPY configure_rsal.sh /opt/dv/
-COPY rsal-workflow2.json site-primary.json site-remote.json /opt/dv/
-VOLUME /hold
diff --git a/conf/docker-dcm/get_transfer.bash b/conf/docker-dcm/get_transfer.bash
deleted file mode 100755
index 42080f536e1..00000000000
--- a/conf/docker-dcm/get_transfer.bash
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-
-# user gets transfer script
-
-dset_id=$1
-if [ -z "$dset_id" ]; then
- echo "no dataset id specified, bailing out"
- exit 1
-fi
-
-k_d=burrito
-dv_d=root
-
-h=http://dvsrv
-
-#get upload script from DCM
-wget --header "X-Dataverse-key: ${k_d}" ${h}/api/datasets/${dset_id}/dataCaptureModule/rsync -O upload-${dset_id}.bash
-
-
diff --git a/conf/docker-dcm/publish_major.bash b/conf/docker-dcm/publish_major.bash
deleted file mode 100755
index 6a3fd1288ca..00000000000
--- a/conf/docker-dcm/publish_major.bash
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# publish dataset based on database id
-
-dset_id=$1
-if [ -z "$dset_id" ]; then
- echo "no dataset id specified, bailing out"
- exit 1
-fi
-
-k_d=burrito
-
-h=http://dvsrv
-
-curl -X POST -H "X-Dataverse-key: ${k_d}" "${h}/api/datasets/${dset_id}/actions/:publish?type=major"
-
-
diff --git a/conf/docker-dcm/readme.md b/conf/docker-dcm/readme.md
deleted file mode 100644
index 3e6a15e61d6..00000000000
--- a/conf/docker-dcm/readme.md
+++ /dev/null
@@ -1,26 +0,0 @@
-This docker-compose setup is intended for use in development, small scale evaluation, and potentially serve as an example of a working (although not production security level) configuration.
-
-Setup:
-
-- build docker-aio image with name dv0 as described in `../docker-aio` (don't start up the docker image or run setupIT.bash)
-- work in the `conf/docker-dcm` directory for below commands
-- download/prepare dependencies: `./0prep.sh`
-- build dcm/dv0dcm images with docker-compose: `docker-compose -f docker-compose.yml build`
-- start containers: `docker-compose -f docker-compose.yml up -d`
-- wait for container to show "healthy" (aka - `docker ps`), then run dataverse app installation: `docker exec dvsrv /opt/dv/install.bash`
-- for development, you probably want to use the `FAKE` DOI provider: `docker exec -it dvsrv /opt/dv/configure_doi.bash`
-- configure dataverse application to use DCM: `docker exec -it dvsrv /opt/dv/configure_dcm.sh`
-- configure dataverse application to use RSAL (if desired): `docker exec -it dvsrv /opt/dv/configure_rsal.sh`
-
-Operation:
-The dataverse installation is accessible at `http://localhost:8084`.
-The `dcm_client` container is intended to be used for executing transfer scripts, and `conf/docker-dcm` is available at `/mnt` inside the container; this container can be accessed with `docker exec -it dcm_client bash`.
-The DCM cron job is NOT configured here; for development purposes the DCM checks can be run manually with `docker exec -it dcmsrv /opt/dcm/scn/post_upload.bash`.
-The RSAL cron job is similarly NOT configured; for development purposes `docker exec -it rsalsrv /opt/rsal/scn/pub.py` can be run manually.
-
-
-Cleanup:
-- shutdown/cleanup `docker-compose -f docker-compose.yml down -v`
-
-For reference, this configuration was working with docker 17.09 / docker-compose 1.16.
-
diff --git a/conf/docker-dcm/rsal-workflow2.json b/conf/docker-dcm/rsal-workflow2.json
deleted file mode 100644
index 322d3ecbcf7..00000000000
--- a/conf/docker-dcm/rsal-workflow2.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
- "name": "RSAL file move for publication",
- "steps": [
- {
- "provider":":internal",
- "stepType":"log",
- "parameters": {
- "message": "Pre-http request"
- }
- },
- {
- "provider":":internal",
- "stepType":"http/sr",
- "parameters": {
- "url":"http://rsalsrv/rr.py",
- "method":"POST",
- "contentType":"text/plain",
- "body":"${invocationId}\ndataset.id=${dataset.id}\ndataset.identifier=${dataset.identifier}\ndataset.globalId=${dataset.globalId}",
- "expectedResponse":"OK.*",
- "rollbackMethod":"DELETE"
- }
- },
- {
- "provider":":internal",
- "stepType":"log",
- "parameters": {
- "message": "Post-http request"
- }
- }
- ]
-}
diff --git a/conf/docker-dcm/rsalsrv.dockerfile b/conf/docker-dcm/rsalsrv.dockerfile
deleted file mode 100644
index 844432afe6b..00000000000
--- a/conf/docker-dcm/rsalsrv.dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-FROM centos:7
-ARG RPMFILE=rsal-0.1-0.noarch.rpm
-RUN yum update; yum install -y epel-release
-COPY ${RPMFILE} /tmp/
-RUN yum localinstall -y /tmp/${RPMFILE}
-COPY cfg/rsal/rsyncd.conf /etc/rsyncd.conf
-COPY cfg/rsal/entrypoint-rsal.sh /entrypoint.sh
-COPY cfg/rsal/lighttpd-modules.conf /etc/lighttpd/modules.conf
-COPY cfg/rsal/lighttpd.conf /etc/lighttpd/lighttpd.conf
-RUN mkdir -p /public/FK2
-RUN pip2 install -r /opt/rsal/scn/requirements.txt
-#COPY doc/testdata/ /hold/
-ARG DV_HOST=http://dv_srv:8080
-ARG DV_API_KEY=burrito
-ENV DV_HOST ${DV_HOST}
-ENV DV_API_KEY ${DV_API_KEY}
-EXPOSE 873
-EXPOSE 80
-HEALTHCHECK CMD curl --fail http://localhost/hw.py || exit 1
-CMD ["/entrypoint.sh"]
diff --git a/conf/docker-dcm/site-primary.json b/conf/docker-dcm/site-primary.json
deleted file mode 100644
index 35b217edffd..00000000000
--- a/conf/docker-dcm/site-primary.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "hostname": "rsalsrv",
- "name": "LibraScholar University",
- "primaryStorage": true,
- "transferProtocols": "rsync,posix"
-}
diff --git a/conf/docker-dcm/site-remote.json b/conf/docker-dcm/site-remote.json
deleted file mode 100644
index d47c3ef4dda..00000000000
--- a/conf/docker-dcm/site-remote.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "hostname": "remote.libra.research",
- "name": "LibraResearch Institute",
- "primaryStorage": false,
- "transferProtocols": "rsync"
-}
diff --git a/conf/jhove/jhove.conf b/conf/jhove/jhove.conf
index 5134ae0f81a..971c60acfaa 100644
--- a/conf/jhove/jhove.conf
+++ b/conf/jhove/jhove.conf
@@ -3,7 +3,7 @@
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://hul.harvard.edu/ois/xml/ns/jhove/jhoveConfig"
xsi:schemaLocation="http://hul.harvard.edu/ois/xml/ns/jhove/jhoveConfig
- file:///usr/local/payara5/glassfish/domains/domain1/config/jhoveConfig.xsd">
+ file:///usr/local/payara6/glassfish/domains/domain1/config/jhoveConfig.xsd">
/usr/local/src/jhoveutf-8/tmp
diff --git a/doc/release-notes/8305-payara6-ee10-v3.md b/doc/release-notes/8305-payara6-ee10-v3.md
new file mode 100644
index 00000000000..94369e0211f
--- /dev/null
+++ b/doc/release-notes/8305-payara6-ee10-v3.md
@@ -0,0 +1,5 @@
+Payara has been updated from version 5 to 6.
+
+Developers, you are encouraged to upgrade to Payara 6 immediately.
+
+Sysadmins, instructions on how to upgrade production installations will be written as part of https://github.com/IQSS/dataverse/issues/9340
diff --git a/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml b/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
index 4f338905751..26144544d9e 100644
--- a/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
+++ b/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
@@ -1,8 +1,8 @@
# currently no other option but to have daily logs and have year-month-day format in the name with
# 4-digit year and 2-digit month and day
-# /usr/local/payara5/glassfish/domains/domain1/logs/counter_2019-01-11.log
+# /usr/local/payara6/glassfish/domains/domain1/logs/counter_2019-01-11.log
#log_name_pattern: sample_logs/counter_(yyyy-mm-dd).log
-log_name_pattern: /usr/local/payara5/glassfish/domains/domain1/logs/mdc/counter_(yyyy-mm-dd).log
+log_name_pattern: /usr/local/payara6/glassfish/domains/domain1/logs/mdc/counter_(yyyy-mm-dd).log
# path_types regular expressions allow matching to classify page urls as either an investigation or request
# based on specific URL structure for your system.
diff --git a/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.root b/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.root
index 1de94331523..b9ef9960318 100755
--- a/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.root
+++ b/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.root
@@ -4,7 +4,7 @@
set -e
-ASADMIN=/usr/local/payara5/bin/asadmin
+ASADMIN=/usr/local/payara6/bin/asadmin
case "$1" in
start)
diff --git a/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.service b/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.service
index 7c457e615d8..19bb190e740 100755
--- a/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.service
+++ b/doc/sphinx-guides/source/_static/installation/files/etc/init.d/payara.init.service
@@ -3,7 +3,7 @@
# description: Payara App Server
set -e
-ASADMIN=/usr/local/payara5/bin/asadmin
+ASADMIN=/usr/local/payara6/bin/asadmin
APP_SERVER_USER=dataverse
case "$1" in
diff --git a/doc/sphinx-guides/source/_static/installation/files/etc/systemd/payara.service b/doc/sphinx-guides/source/_static/installation/files/etc/systemd/payara.service
index c8c82f6d6b2..c8efcb9c6f9 100644
--- a/doc/sphinx-guides/source/_static/installation/files/etc/systemd/payara.service
+++ b/doc/sphinx-guides/source/_static/installation/files/etc/systemd/payara.service
@@ -4,9 +4,9 @@ After = syslog.target network.target
[Service]
Type = forking
-ExecStart = /usr/bin/java -jar /usr/local/payara5/glassfish/lib/client/appserver-cli.jar start-domain
-ExecStop = /usr/bin/java -jar /usr/local/payara5/glassfish/lib/client/appserver-cli.jar stop-domain
-ExecReload = /usr/bin/java -jar /usr/local/payara5/glassfish/lib/client/appserver-cli.jar restart-domain
+ExecStart = /usr/bin/java -jar /usr/local/payara6/glassfish/lib/client/appserver-cli.jar start-domain
+ExecStop = /usr/bin/java -jar /usr/local/payara6/glassfish/lib/client/appserver-cli.jar stop-domain
+ExecReload = /usr/bin/java -jar /usr/local/payara6/glassfish/lib/client/appserver-cli.jar restart-domain
User=dataverse
LimitNOFILE=32768
Environment="LANG=en_US.UTF-8"
diff --git a/doc/sphinx-guides/source/_static/installation/files/usr/local/payara5/glassfish/domains/domain1/config/logging.properties b/doc/sphinx-guides/source/_static/installation/files/usr/local/payara5/glassfish/domains/domain1/config/logging.properties
deleted file mode 100644
index 4054c794452..00000000000
--- a/doc/sphinx-guides/source/_static/installation/files/usr/local/payara5/glassfish/domains/domain1/config/logging.properties
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
-#
-# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
-#
-# The contents of this file are subject to the terms of either the GNU
-# General Public License Version 2 only ("GPL") or the Common Development
-# and Distribution License("CDDL") (collectively, the "License"). You
-# may not use this file except in compliance with the License. You can
-# obtain a copy of the License at
-# https://glassfish.dev.java.net/public/CDDL+GPL_1_1.html
-# or packager/legal/LICENSE.txt. See the License for the specific
-# language governing permissions and limitations under the License.
-#
-# When distributing the software, include this License Header Notice in each
-# file and include the License file at packager/legal/LICENSE.txt.
-#
-# GPL Classpath Exception:
-# Oracle designates this particular file as subject to the "Classpath"
-# exception as provided by Oracle in the GPL Version 2 section of the License
-# file that accompanied this code.
-#
-# Modifications:
-# If applicable, add the following below the License Header, with the fields
-# enclosed by brackets [] replaced by your own identifying information:
-# "Portions Copyright [year] [name of copyright owner]"
-#
-# Contributor(s):
-# If you wish your version of this file to be governed by only the CDDL or
-# only the GPL Version 2, indicate your decision by adding "[Contributor]
-# elects to include this software in this distribution under the [CDDL or GPL
-# Version 2] license." If you don't indicate a single choice of license, a
-# recipient has the option to distribute your version of this file under
-# either the CDDL, the GPL Version 2 or to extend the choice of license to
-# its licensees as provided above. However, if you add GPL Version 2 code
-# and therefore, elected the GPL Version 2 license, then the option applies
-# only if the new code is made subject to such option by the copyright
-# holder.
-#
-# Portions Copyright [2016-2021] [Payara Foundation and/or its affiliates]
-
-#GlassFish logging.properties list
-#Update June 13 2012
-
-#All attributes details
-handlers=java.util.logging.ConsoleHandler
-handlerServices=com.sun.enterprise.server.logging.GFFileHandler,com.sun.enterprise.server.logging.SyslogHandler
-java.util.logging.ConsoleHandler.formatter=com.sun.enterprise.server.logging.UniformLogFormatter
-java.util.logging.FileHandler.count=1
-java.util.logging.FileHandler.formatter=java.util.logging.XMLFormatter
-java.util.logging.FileHandler.limit=50000
-java.util.logging.FileHandler.pattern=%h/java%u.log
-com.sun.enterprise.server.logging.GFFileHandler.compressOnRotation=false
-com.sun.enterprise.server.logging.GFFileHandler.excludeFields=
-com.sun.enterprise.server.logging.GFFileHandler.file=${com.sun.aas.instanceRoot}/logs/server.log
-com.sun.enterprise.server.logging.GFFileHandler.flushFrequency=1
-com.sun.enterprise.server.logging.GFFileHandler.formatter=com.sun.enterprise.server.logging.ODLLogFormatter
-com.sun.enterprise.server.logging.GFFileHandler.level=ALL
-com.sun.enterprise.server.logging.GFFileHandler.logStandardStreams=true
-com.sun.enterprise.server.logging.GFFileHandler.logtoConsole=false
-com.sun.enterprise.server.logging.GFFileHandler.logtoFile=true
-com.sun.enterprise.server.logging.GFFileHandler.maxHistoryFiles=0
-com.sun.enterprise.server.logging.GFFileHandler.multiLineMode=true
-com.sun.enterprise.server.logging.GFFileHandler.retainErrorsStasticsForHours=0
-com.sun.enterprise.server.logging.GFFileHandler.rotationLimitInBytes=2000000
-com.sun.enterprise.server.logging.GFFileHandler.rotationOnDateChange=false
-com.sun.enterprise.server.logging.GFFileHandler.rotationTimelimitInMinutes=0
-com.sun.enterprise.server.logging.SyslogHandler.level=ALL
-com.sun.enterprise.server.logging.SyslogHandler.useSystemLogging=false
-log4j.logger.org.hibernate.validator.util.Version=warn
-com.sun.enterprise.server.logging.UniformLogFormatter.ansiColor=true
-
-#Payara Notification logging properties
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.compressOnRotation=false
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.file=${com.sun.aas.instanceRoot}/logs/notification.log
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.formatter=com.sun.enterprise.server.logging.ODLLogFormatter
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.logtoFile=true
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.maxHistoryFiles=0
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.rotationLimitInBytes=2000000
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.rotationOnDateChange=false
-fish.payara.enterprise.server.logging.PayaraNotificationFileHandler.rotationTimelimitInMinutes=0
-fish.payara.deprecated.jsonlogformatter.underscoreprefix=false
-
-#All log level details
-
-.level=INFO
-ShoalLogger.level=CONFIG
-com.hazelcast.level=WARNING
-java.util.logging.ConsoleHandler.level=FINEST
-javax.enterprise.resource.corba.level=INFO
-javax.enterprise.resource.javamail.level=INFO
-javax.enterprise.resource.jdo.level=INFO
-javax.enterprise.resource.jms.level=INFO
-javax.enterprise.resource.jta.level=INFO
-javax.enterprise.resource.resourceadapter.level=INFO
-javax.enterprise.resource.sqltrace.level=FINE
-javax.enterprise.resource.webcontainer.jsf.application.level=INFO
-javax.enterprise.resource.webcontainer.jsf.config.level=INFO
-javax.enterprise.resource.webcontainer.jsf.context.level=INFO
-javax.enterprise.resource.webcontainer.jsf.facelets.level=INFO
-javax.enterprise.resource.webcontainer.jsf.lifecycle.level=INFO
-javax.enterprise.resource.webcontainer.jsf.managedbean.level=INFO
-javax.enterprise.resource.webcontainer.jsf.renderkit.level=INFO
-javax.enterprise.resource.webcontainer.jsf.resource.level=INFO
-javax.enterprise.resource.webcontainer.jsf.taglib.level=INFO
-javax.enterprise.resource.webcontainer.jsf.timing.level=INFO
-javax.enterprise.system.container.cmp.level=INFO
-javax.enterprise.system.container.ejb.level=INFO
-javax.enterprise.system.container.ejb.mdb.level=INFO
-javax.enterprise.system.container.web.level=INFO
-javax.enterprise.system.core.classloading.level=INFO
-javax.enterprise.system.core.config.level=INFO
-javax.enterprise.system.core.level=INFO
-javax.enterprise.system.core.security.level=INFO
-javax.enterprise.system.core.selfmanagement.level=INFO
-javax.enterprise.system.core.transaction.level=INFO
-javax.enterprise.system.level=INFO
-javax.enterprise.system.ssl.security.level=INFO
-javax.enterprise.system.tools.admin.level=INFO
-javax.enterprise.system.tools.backup.level=INFO
-javax.enterprise.system.tools.deployment.common.level=WARNING
-javax.enterprise.system.tools.deployment.dol.level=WARNING
-javax.enterprise.system.tools.deployment.level=INFO
-javax.enterprise.system.util.level=INFO
-javax.enterprise.system.webservices.registry.level=INFO
-javax.enterprise.system.webservices.rpc.level=INFO
-javax.enterprise.system.webservices.saaj.level=INFO
-javax.level=INFO
-javax.mail.level=INFO
-javax.org.glassfish.persistence.level=INFO
-org.apache.catalina.level=INFO
-org.apache.coyote.level=INFO
-org.apache.jasper.level=INFO
-org.eclipse.persistence.session.level=INFO
-org.glassfish.admingui.level=INFO
-org.glassfish.naming.level=INFO
-org.jvnet.hk2.osgiadapter.level=INFO
-
-javax.enterprise.resource.corba.level=INFO
-javax.enterprise.resource.jta.level=INFO
-javax.enterprise.system.webservices.saaj.level=INFO
-javax.enterprise.system.container.ejb.level=INFO
-javax.enterprise.system.container.ejb.mdb.level=INFO
-javax.enterprise.resource.javamail.level=INFO
-javax.enterprise.system.webservices.rpc.level=INFO
-javax.enterprise.system.container.web.level=INFO
-javax.enterprise.resource.jms.level=INFO
-javax.enterprise.system.webservices.registry.level=INFO
-javax.enterprise.resource.webcontainer.jsf.application.level=INFO
-javax.enterprise.resource.webcontainer.jsf.resource.level=INFO
-javax.enterprise.resource.webcontainer.jsf.config.level=INFO
-javax.enterprise.resource.webcontainer.jsf.context.level=INFO
-javax.enterprise.resource.webcontainer.jsf.facelets.level=INFO
-javax.enterprise.resource.webcontainer.jsf.lifecycle.level=INFO
-javax.enterprise.resource.webcontainer.jsf.managedbean.level=INFO
-javax.enterprise.resource.webcontainer.jsf.renderkit.level=INFO
-javax.enterprise.resource.webcontainer.jsf.taglib.level=INFO
-javax.enterprise.resource.webcontainer.jsf.timing.level=INFO
-javax.org.glassfish.persistence.level=INFO
-javax.enterprise.system.tools.backup.level=INFO
-javax.mail.level=INFO
-org.glassfish.admingui.level=INFO
-org.glassfish.naming.level=INFO
-org.eclipse.persistence.session.level=INFO
-javax.enterprise.system.tools.deployment.dol.level=WARNING
-javax.enterprise.system.tools.deployment.common.level=WARNING
diff --git a/doc/sphinx-guides/source/_static/util/clear_timer.sh b/doc/sphinx-guides/source/_static/util/clear_timer.sh
index 1d9966e4e07..641b2695084 100755
--- a/doc/sphinx-guides/source/_static/util/clear_timer.sh
+++ b/doc/sphinx-guides/source/_static/util/clear_timer.sh
@@ -8,7 +8,7 @@
# if you'd like to avoid that.
# directory where Payara is installed
-PAYARA_DIR=/usr/local/payara5
+PAYARA_DIR=/usr/local/payara6
# directory within Payara (defaults)
DV_DIR=${PAYARA_DIR}/glassfish/domains/domain1
diff --git a/doc/sphinx-guides/source/_static/util/counter_daily.sh b/doc/sphinx-guides/source/_static/util/counter_daily.sh
index a12439d9cf8..674972b18f2 100644
--- a/doc/sphinx-guides/source/_static/util/counter_daily.sh
+++ b/doc/sphinx-guides/source/_static/util/counter_daily.sh
@@ -1,7 +1,7 @@
#! /bin/bash
COUNTER_PROCESSOR_DIRECTORY="/usr/local/counter-processor-0.1.04"
-MDC_LOG_DIRECTORY="/usr/local/payara5/glassfish/domains/domain1/logs/mdc"
+MDC_LOG_DIRECTORY="/usr/local/payara6/glassfish/domains/domain1/logs/mdc"
# counter_daily.sh
diff --git a/doc/sphinx-guides/source/admin/harvestclients.rst b/doc/sphinx-guides/source/admin/harvestclients.rst
index da922459f46..59fc4dc2c64 100644
--- a/doc/sphinx-guides/source/admin/harvestclients.rst
+++ b/doc/sphinx-guides/source/admin/harvestclients.rst
@@ -35,8 +35,8 @@ For example:
.. code-block:: bash
- sudo touch /usr/local/payara5/glassfish/domains/domain1/logs/stopharvest_bigarchive.70916
- sudo chown dataverse /usr/local/payara5/glassfish/domains/domain1/logs/stopharvest_bigarchive.70916
+ sudo touch /usr/local/payara6/glassfish/domains/domain1/logs/stopharvest_bigarchive.70916
+ sudo chown dataverse /usr/local/payara6/glassfish/domains/domain1/logs/stopharvest_bigarchive.70916
Note: If the application server is stopped and restarted, any running harvesting jobs will be killed but may remain marked as in progress in the database. We thus recommend using the mechanism here to stop ongoing harvests prior to a server restart.
@@ -44,6 +44,6 @@ Note: If the application server is stopped and restarted, any running harvesting
What if a Run Fails?
~~~~~~~~~~~~~~~~~~~~
-Each harvesting client run logs a separate file per run to the app server's default logging directory (``/usr/local/payara5/glassfish/domains/domain1/logs/`` unless you've changed it). Look for filenames in the format ``harvest_TARGET_YYYY_MM_DD_timestamp.log`` to get a better idea of what's going wrong.
+Each harvesting client run logs a separate file per run to the app server's default logging directory (``/usr/local/payara6/glassfish/domains/domain1/logs/`` unless you've changed it). Look for filenames in the format ``harvest_TARGET_YYYY_MM_DD_timestamp.log`` to get a better idea of what's going wrong.
Note that you'll want to run a minimum of Dataverse Software 4.6, optimally 4.18 or beyond, for the best OAI-PMH interoperability.
diff --git a/doc/sphinx-guides/source/admin/make-data-count.rst b/doc/sphinx-guides/source/admin/make-data-count.rst
index ec6ff9a685b..fe32af6649a 100644
--- a/doc/sphinx-guides/source/admin/make-data-count.rst
+++ b/doc/sphinx-guides/source/admin/make-data-count.rst
@@ -72,10 +72,9 @@ Enable or Disable Display of Make Data Count Metrics
By default, when MDC logging is enabled (when ``:MDCLogPath`` is set), your Dataverse installation will display MDC metrics instead of it's internal (legacy) metrics. You can avoid this (e.g. to collect MDC metrics for some period of time before starting to display them) by setting ``:DisplayMDCMetrics`` to false.
+The following discussion assumes ``:MDCLogPath`` has been set to ``/usr/local/payara6/glassfish/domains/domain1/logs/mdc``
You can also decide to display MDC metrics along with Dataverse's traditional download counts from the time before MDC was enabled. To do this, set the :ref:`:MDCStartDate` to when you started MDC logging.
-The following discussion assumes ``:MDCLogPath`` has been set to ``/usr/local/payara5/glassfish/domains/domain1/logs/mdc``
-
Configure Counter Processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -104,7 +103,7 @@ Soon we will be setting up a cron job to run nightly but we start with a single
* If you are running Counter Processor for the first time in the middle of a month, you will need create blank log files for the previous days. e.g.:
- * ``cd /usr/local/payara5/glassfish/domains/domain1/logs/mdc``
+ * ``cd /usr/local/payara6/glassfish/domains/domain1/logs/mdc``
* ``touch counter_2019-02-01.log``
diff --git a/doc/sphinx-guides/source/admin/metadatacustomization.rst b/doc/sphinx-guides/source/admin/metadatacustomization.rst
index cac051ddb59..53bc32eca3d 100644
--- a/doc/sphinx-guides/source/admin/metadatacustomization.rst
+++ b/doc/sphinx-guides/source/admin/metadatacustomization.rst
@@ -413,8 +413,8 @@ Setting Up a Dev Environment for Testing
You have several options for setting up a dev environment for testing metadata block changes:
+- Docker: See :doc:`/container/index`.
- Vagrant: See the :doc:`/developers/tools` section of the Developer Guide.
-- docker-aio: See https://github.com/IQSS/dataverse/tree/develop/conf/docker-aio
- AWS deployment: See the :doc:`/developers/deployment` section of the Developer Guide.
- Full dev environment: See the :doc:`/developers/dev-environment` section of the Developer Guide.
diff --git a/doc/sphinx-guides/source/admin/troubleshooting.rst b/doc/sphinx-guides/source/admin/troubleshooting.rst
index 9f085ba90cd..acbdcaae17e 100644
--- a/doc/sphinx-guides/source/admin/troubleshooting.rst
+++ b/doc/sphinx-guides/source/admin/troubleshooting.rst
@@ -53,15 +53,13 @@ Long-Running Ingest Jobs Have Exhausted System Resources
Ingest is both CPU- and memory-intensive, and depending on your system resources and the size and format of tabular data files uploaded, may render your Dataverse installation unresponsive or nearly inoperable. It is possible to cancel these jobs by purging the ingest queue.
-``/usr/local/payara5/mq/bin/imqcmd -u admin query dst -t q -n DataverseIngest`` will query the DataverseIngest destination. The password, unless you have changed it, matches the username.
+``/usr/local/payara6/mq/bin/imqcmd -u admin query dst -t q -n DataverseIngest`` will query the DataverseIngest destination. The password, unless you have changed it, matches the username.
-``/usr/local/payara5/mq/bin/imqcmd -u admin purge dst -t q -n DataverseIngest`` will purge the DataverseIngest queue, and prompt for your confirmation.
+``/usr/local/payara6/mq/bin/imqcmd -u admin purge dst -t q -n DataverseIngest`` will purge the DataverseIngest queue, and prompt for your confirmation.
Finally, list destinations to verify that the purge was successful:
-``/usr/local/payara5/mq/bin/imqcmd -u admin list dst``
-
-If you are still running Glassfish, substitute glassfish4 for payara5 above. If you have installed your Dataverse installation in some other location, adjust the above paths accordingly.
+``/usr/local/payara6/mq/bin/imqcmd -u admin list dst``
.. _troubleshooting-payara:
@@ -73,7 +71,7 @@ Payara
Finding the Payara Log File
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``/usr/local/payara5/glassfish/domains/domain1/logs/server.log`` is the main place to look when you encounter problems (assuming you installed Payara in the default directory). Hopefully an error message has been logged. If there's a stack trace, it may be of interest to developers, especially they can trace line numbers back to a tagged version or commit. Send more of the stack trace (the entire file if possible) to developers who can help (see "Getting Help", below) and be sure to say which version of the Dataverse Software you have installed.
+``/usr/local/payara6/glassfish/domains/domain1/logs/server.log`` is the main place to look when you encounter problems (assuming you installed Payara in the default directory). Hopefully an error message has been logged. If there's a stack trace, it may be of interest to developers, especially they can trace line numbers back to a tagged version or commit. Send more of the stack trace (the entire file if possible) to developers who can help (see "Getting Help", below) and be sure to say which version of the Dataverse Software you have installed.
.. _increase-payara-logging:
diff --git a/doc/sphinx-guides/source/container/dev-usage.rst b/doc/sphinx-guides/source/container/dev-usage.rst
index 3fbe55766d5..04c7eba7913 100644
--- a/doc/sphinx-guides/source/container/dev-usage.rst
+++ b/doc/sphinx-guides/source/container/dev-usage.rst
@@ -9,17 +9,7 @@ Please note! This Docker setup is not for production!
Quickstart
----------
-First, install Java 11 and Maven.
-
-After cloning the repo, try this:
-
-``mvn -Pct clean package docker:run``
-
-After some time you should be able to log in:
-
-- url: http://localhost:8080
-- username: dataverseAdmin
-- password: admin1
+See :ref:`container-dev-quickstart`.
Intro
-----
@@ -172,7 +162,7 @@ restart the application container:
Using ``docker container inspect dev_dataverse | grep Image`` you can verify the changed checksums.
-Using A Debugger
+Using a Debugger
----------------
The :doc:`base-image` enables usage of the `Java Debugging Wire Protocol `_
@@ -183,3 +173,8 @@ There are a lot of tutorials how to connect your IDE's debugger to a remote endp
as the endpoint. Here are links to the most common IDEs docs on remote debugging:
`Eclipse `_,
`IntelliJ `_
+
+Building Your Own Base Image
+----------------------------
+
+If you find yourself tasked with upgrading Payara, you will need to create your own base image before running the :ref:`container-dev-quickstart`. For instructions, see :doc:`base-image`.
diff --git a/doc/sphinx-guides/source/developers/big-data-support.rst b/doc/sphinx-guides/source/developers/big-data-support.rst
index b238a7623eb..04885571a01 100644
--- a/doc/sphinx-guides/source/developers/big-data-support.rst
+++ b/doc/sphinx-guides/source/developers/big-data-support.rst
@@ -173,6 +173,8 @@ See also :ref:`Globus settings <:GlobusBasicToken>`.
Data Capture Module (DCM)
-------------------------
+Please note: The DCM feature is deprecated.
+
Data Capture Module (DCM) is an experimental component that allows users to upload large datasets via rsync over ssh.
DCM was developed and tested using Glassfish but these docs have been updated with references to Payara.
@@ -209,7 +211,7 @@ The JSON that a DCM sends to your Dataverse installation on successful checksum
:language: json
- ``status`` - The valid strings to send are ``validation passed`` and ``validation failed``.
-- ``uploadFolder`` - This is the directory on disk where your Dataverse installation should attempt to find the files that a DCM has moved into place. There should always be a ``files.sha`` file and a least one data file. ``files.sha`` is a manifest of all the data files and their checksums. The ``uploadFolder`` directory is inside the directory where data is stored for the dataset and may have the same name as the "identifier" of the persistent id (DOI or Handle). For example, you would send ``"uploadFolder": "DNXV2H"`` in the JSON file when the absolute path to this directory is ``/usr/local/payara5/glassfish/domains/domain1/files/10.5072/FK2/DNXV2H/DNXV2H``.
+- ``uploadFolder`` - This is the directory on disk where your Dataverse installation should attempt to find the files that a DCM has moved into place. There should always be a ``files.sha`` file and a least one data file. ``files.sha`` is a manifest of all the data files and their checksums. The ``uploadFolder`` directory is inside the directory where data is stored for the dataset and may have the same name as the "identifier" of the persistent id (DOI or Handle). For example, you would send ``"uploadFolder": "DNXV2H"`` in the JSON file when the absolute path to this directory is ``/usr/local/payara6/glassfish/domains/domain1/files/10.5072/FK2/DNXV2H/DNXV2H``.
- ``totalSize`` - Your Dataverse installation will use this value to represent the total size in bytes of all the files in the "package" that's created. If 360 data files and one ``files.sha`` manifest file are in the ``uploadFolder``, this value is the sum of the 360 data files.
@@ -231,9 +233,9 @@ Add Dataverse Installation settings to use mock (same as using DCM, noted above)
At this point you should be able to download a placeholder rsync script. Your Dataverse installation is then waiting for news from the DCM about if checksum validation has succeeded or not. First, you have to put files in place, which is usually the job of the DCM. You should substitute "X1METO" for the "identifier" of the dataset you create. You must also use the proper path for where you store files in your dev environment.
-- ``mkdir /usr/local/payara5/glassfish/domains/domain1/files/10.5072/FK2/X1METO``
-- ``mkdir /usr/local/payara5/glassfish/domains/domain1/files/10.5072/FK2/X1METO/X1METO``
-- ``cd /usr/local/payara5/glassfish/domains/domain1/files/10.5072/FK2/X1METO/X1METO``
+- ``mkdir /usr/local/payara6/glassfish/domains/domain1/files/10.5072/FK2/X1METO``
+- ``mkdir /usr/local/payara6/glassfish/domains/domain1/files/10.5072/FK2/X1METO/X1METO``
+- ``cd /usr/local/payara6/glassfish/domains/domain1/files/10.5072/FK2/X1METO/X1METO``
- ``echo "hello" > file1.txt``
- ``shasum file1.txt > files.sha``
@@ -248,104 +250,11 @@ The following low level command should only be used when troubleshooting the "im
``curl -H "X-Dataverse-key: $API_TOKEN" -X POST "$DV_BASE_URL/api/batch/jobs/import/datasets/files/$DATASET_DB_ID?uploadFolder=$UPLOAD_FOLDER&totalSize=$TOTAL_SIZE"``
-Steps to set up a DCM via Docker for Development
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you need a fully operating DCM client for development purposes, these steps will guide you to setting one up. This includes steps to set up the DCM on S3 variant.
-
-Docker Image Set-up
-^^^^^^^^^^^^^^^^^^^
-
-See https://github.com/IQSS/dataverse/blob/develop/conf/docker-dcm/readme.md
-
-- Install docker if you do not have it
-
-Optional steps for setting up the S3 Docker DCM Variant
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-- Before: the default bucket for DCM to hold files in S3 is named test-dcm. It is coded into `post_upload_s3.bash` (line 30). Change to a different bucket if needed.
-- Also Note: With the new support for multiple file store in the Dataverse Software, DCM requires a store with id="s3" and DCM will only work with this store.
-
- - Add AWS bucket info to dcmsrv
- - Add AWS credentials to ``~/.aws/credentials``
-
- - ``[default]``
- - ``aws_access_key_id =``
- - ``aws_secret_access_key =``
-
-- Dataverse installation configuration (on dvsrv):
-
- - Set S3 as the storage driver
-
- - ``cd /opt/payara5/bin/``
- - ``./asadmin delete-jvm-options "\-Ddataverse.files.storage-driver-id=file"``
- - ``./asadmin create-jvm-options "\-Ddataverse.files.storage-driver-id=s3"``
- - ``./asadmin create-jvm-options "\-Ddataverse.files.s3.type=s3"``
- - ``./asadmin create-jvm-options "\-Ddataverse.files.s3.label=s3"``
-
-
- - Add AWS bucket info to your Dataverse installation
- - Add AWS credentials to ``~/.aws/credentials``
-
- - ``[default]``
- - ``aws_access_key_id =``
- - ``aws_secret_access_key =``
-
- - Also: set region in ``~/.aws/config`` to create a region file. Add these contents:
-
- - ``[default]``
- - ``region = us-east-1``
-
- - Add the S3 bucket names to your Dataverse installation
-
- - S3 bucket for your Dataverse installation
-
- - ``/usr/local/payara5/glassfish/bin/asadmin create-jvm-options "-Ddataverse.files.s3.bucket-name=iqsstestdcmbucket"``
-
- - S3 bucket for DCM (as your Dataverse installation needs to do the copy over)
-
- - ``/usr/local/payara5/glassfish/bin/asadmin create-jvm-options "-Ddataverse.files.dcm-s3-bucket-name=test-dcm"``
-
- - Set download method to be HTTP, as DCM downloads through S3 are over this protocol ``curl -X PUT "http://localhost:8080/api/admin/settings/:DownloadMethods" -d "native/http"``
-
-Using the DCM Docker Containers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For using these commands, you will need to connect to the shell prompt inside various containers (e.g. ``docker exec -it dvsrv /bin/bash``)
-
-- Create a dataset and download rsync upload script
-
- - connect to client container: ``docker exec -it dcm_client bash``
- - create dataset: ``cd /mnt ; ./create.bash`` ; this will echo the database ID to stdout
- - download transfer script: ``./get_transfer.bash $database_id_from_create_script``
- - execute the transfer script: ``bash ./upload-${database_id_from-create_script}.bash`` , and follow instructions from script.
-
-- Run script
-
- - e.g. ``bash ./upload-3.bash`` (``3`` being the database id from earlier commands in this example).
-
-- Manually run post upload script on dcmsrv
-
- - for posix implementation: ``docker exec -it dcmsrv /opt/dcm/scn/post_upload.bash``
- - for S3 implementation: ``docker exec -it dcmsrv /opt/dcm/scn/post_upload_s3.bash``
-
-Additional DCM docker development tips
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-- You can completely blow away all the docker images with these commands (including non DCM ones!)
- - ``docker-compose -f docmer-compose.yml down -v``
-
-- There are a few logs to tail
-
- - dvsrv : ``tail -n 2000 -f /opt/payara5/glassfish/domains/domain1/logs/server.log``
- - dcmsrv : ``tail -n 2000 -f /var/log/lighttpd/breakage.log``
- - dcmsrv : ``tail -n 2000 -f /var/log/lighttpd/access.log``
-
-- You may have to restart the app server domain occasionally to deal with memory filling up. If deployment is getting reallllllly slow, its a good time.
-
Repository Storage Abstraction Layer (RSAL)
-------------------------------------------
+Please note: The RSAL feature is deprecated.
+
Steps to set up a DCM via Docker for Development
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinx-guides/source/developers/classic-dev-env.rst b/doc/sphinx-guides/source/developers/classic-dev-env.rst
new file mode 100755
index 00000000000..423f609dd2f
--- /dev/null
+++ b/doc/sphinx-guides/source/developers/classic-dev-env.rst
@@ -0,0 +1,266 @@
+=======================
+Classic Dev Environment
+=======================
+
+These are the old instructions we used for Dataverse 4 and 5. They should still work but these days we favor running Dataverse in Docker as described in :doc:`dev-environment`.
+
+These instructions are purposefully opinionated and terse to help you get your development environment up and running as quickly as possible! Please note that familiarity with running commands from the terminal is assumed.
+
+.. contents:: |toctitle|
+ :local:
+
+Quick Start (Docker)
+--------------------
+
+The quickest way to get Dataverse running is in Docker as explained in :doc:`../container/dev-usage` section of the Container Guide.
+
+
+Classic Dev Environment
+-----------------------
+
+Since before Docker existed, we have encouraged installing Dataverse and all its dependencies directly on your development machine, as described below. This can be thought of as the "classic" development environment for Dataverse.
+
+However, in 2023 we decided that we'd like to encourage all developers to start using Docker instead and opened https://github.com/IQSS/dataverse/issues/9616 to indicate that we plan to rewrite this page to recommend the use of Docker.
+
+There's nothing wrong with the classic instructions below and we don't plan to simply delete them. They are a valid alternative to running Dataverse in Docker. We will likely move them to another page.
+
+Set Up Dependencies
+-------------------
+
+Supported Operating Systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Mac OS X or Linux is required because the setup scripts assume the presence of standard Unix utilities.
+
+Windows is gaining support through Docker as described in the :doc:`windows` section.
+
+Install Java
+~~~~~~~~~~~~
+
+The Dataverse Software requires Java 11.
+
+We suggest downloading OpenJDK from https://adoptopenjdk.net
+
+On Linux, you are welcome to use the OpenJDK available from package managers.
+
+Install Netbeans or Maven
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NetBeans IDE is recommended, and can be downloaded from http://netbeans.org . Developers may use any editor or IDE. We recommend NetBeans because it is free, works cross platform, has good support for Jakarta EE projects, and includes a required build tool, Maven.
+
+Below we describe how to build the Dataverse Software war file with Netbeans but if you prefer to use only Maven, you can find installation instructions in the :doc:`tools` section.
+
+Install Homebrew (Mac Only)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On Mac, install Homebrew to simplify the steps below: https://brew.sh
+
+Clone the Dataverse Software Git Repo
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Fork https://github.com/IQSS/dataverse and then clone your fork like this:
+
+``git clone git@github.com:[YOUR GITHUB USERNAME]/dataverse.git``
+
+Build the Dataverse Software War File
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you installed Netbeans, follow these steps:
+
+- Launch Netbeans and click "File" and then "Open Project". Navigate to where you put the Dataverse Software code and double-click "Dataverse" to open the project.
+- If you see "resolve project problems," go ahead and let Netbeans try to resolve them. This will probably including downloading dependencies, which can take a while.
+- Allow Netbeans to install nb-javac (required for Java 8 and below).
+- Select "Dataverse" under Projects and click "Run" in the menu and then "Build Project (Dataverse)". Check back for "BUILD SUCCESS" at the end.
+
+If you installed Maven instead of Netbeans, run ``mvn package``. Check for "BUILD SUCCESS" at the end.
+
+NOTE: Do you use a locale different than ``en_US.UTF-8`` on your development machine? Are you in a different timezone
+than Harvard (Eastern Time)? You might experience issues while running tests that were written with these settings
+in mind. The Maven ``pom.xml`` tries to handle this for you by setting the locale to ``en_US.UTF-8`` and timezone
+``UTC``, but more, not yet discovered building or testing problems might lurk in the shadows.
+
+Install jq
+~~~~~~~~~~
+
+On Mac, run this command:
+
+``brew install jq``
+
+On Linux, install ``jq`` from your package manager or download a binary from http://stedolan.github.io/jq/
+
+Install Payara
+~~~~~~~~~~~~~~
+
+Payara 6.2023.7 or higher is required.
+
+To install Payara, run the following commands:
+
+``cd /usr/local``
+
+``sudo curl -O -L https://nexus.payara.fish/repository/payara-community/fish/payara/distributions/payara/6.2023.7/payara-6.2023.7.zip``
+
+``sudo unzip payara-6.2023.7.zip``
+
+``sudo chown -R $USER /usr/local/payara6``
+
+If nexus.payara.fish is ever down for maintenance, Payara distributions are also available from https://repo1.maven.org/maven2/fish/payara/distributions/payara/
+
+Install Service Dependencies Directly on localhost
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install PostgreSQL
+^^^^^^^^^^^^^^^^^^
+
+The Dataverse Software has been tested with PostgreSQL versions up to 13. PostgreSQL version 10+ is required.
+
+On Mac, go to https://www.postgresql.org/download/macosx/ and choose "Interactive installer by EDB" option. Note that version 13.5 is used in the command line examples below, but the process should be similar for other versions. When prompted to set a password for the "database superuser (postgres)" just enter "password".
+
+After installation is complete, make a backup of the ``pg_hba.conf`` file like this:
+
+``sudo cp /Library/PostgreSQL/13/data/pg_hba.conf /Library/PostgreSQL/13/data/pg_hba.conf.orig``
+
+Then edit ``pg_hba.conf`` with an editor such as vi:
+
+``sudo vi /Library/PostgreSQL/13/data/pg_hba.conf``
+
+In the "METHOD" column, change all instances of "scram-sha-256" (or whatever is in that column) to "trust". This will make it so PostgreSQL doesn't require a password.
+
+In the Finder, click "Applications" then "PostgreSQL 13" and launch the "Reload Configuration" app. Click "OK" after you see "server signaled".
+
+Next, to confirm the edit worked, launch the "pgAdmin" application from the same folder. Under "Browser", expand "Servers" and double click "PostgreSQL 13". When you are prompted for a password, leave it blank and click "OK". If you have successfully edited "pg_hba.conf", you can get in without a password.
+
+On Linux, you should just install PostgreSQL using your favorite package manager, such as ``yum``. (Consult the PostgreSQL section of :doc:`/installation/prerequisites` in the main Installation guide for more info and command line examples). Find ``pg_hba.conf`` and set the authentication method to "trust" and restart PostgreSQL.
+
+Install Solr
+^^^^^^^^^^^^
+
+`Solr `_ 8.11.1 is required.
+
+To install Solr, execute the following commands:
+
+``sudo mkdir /usr/local/solr``
+
+``sudo chown $USER /usr/local/solr``
+
+``cd /usr/local/solr``
+
+``curl -O http://archive.apache.org/dist/lucene/solr/8.11.1/solr-8.11.1.tgz``
+
+``tar xvfz solr-8.11.1.tgz``
+
+``cd solr-8.11.1/server/solr``
+
+``cp -r configsets/_default collection1``
+
+``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/schema.xml``
+
+``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/schema_dv_mdb_fields.xml``
+
+``mv schema*.xml collection1/conf``
+
+``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/solrconfig.xml``
+
+``mv solrconfig.xml collection1/conf/solrconfig.xml``
+
+``cd /usr/local/solr/solr-8.11.1``
+
+(Please note that the extra jetty argument below is a security measure to limit connections to Solr to only your computer. For extra security, run a firewall.)
+
+``bin/solr start -j "-Djetty.host=127.0.0.1"``
+
+``bin/solr create_core -c collection1 -d server/solr/collection1/conf``
+
+Install Service Dependencies Using Docker Compose
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+To avoid having to install service dependencies like PostgreSQL or Solr directly on your localhost, there is the alternative of using the ``docker-compose-dev.yml`` file available in the repository root. For this option you need to have Docker and Docker Compose installed on your machine.
+
+The ``docker-compose-dev.yml`` can be configured to only run the service dependencies necessary to support a Dataverse installation running directly on localhost. In addition to PostgreSQL and Solr, it also runs a SMTP server.
+
+Before running the Docker Compose file, you need to update the value of the ``DATAVERSE_DB_USER`` environment variable to ``postgres``. The variable can be found inside the ``.env`` file in the repository root. This step is required as the Dataverse installation script expects that database user.
+
+To run the Docker Compose file, go to the Dataverse repository root, then run:
+
+``docker-compose -f docker-compose-dev.yml up -d --scale dev_dataverse=0``
+
+Note that this command omits the Dataverse container defined in the Docker Compose file, since Dataverse is going to be installed directly on localhost in the next section.
+
+The command runs the containers in detached mode, but if you want to run them attached and thus view container logs in real time, remove the ``-d`` option from the command.
+
+Data volumes of each dependency will be persisted inside the ``docker-dev-volumes`` folder, inside the repository root.
+
+If you want to stop the containers, then run (for detached mode only, otherwise use ``Ctrl + C``):
+
+``docker-compose -f docker-compose-dev.yml stop``
+
+If you want to remove the containers, then run:
+
+``docker-compose -f docker-compose-dev.yml down``
+
+If you want to run a single container (the mail server, for example) then run:
+
+``docker-compose -f docker-compose-dev.yml up dev_smtp``
+
+For a fresh installation, and before running the Software Installer Script, it is recommended to delete the docker-dev-env folder to avoid installation problems due to existing data in the containers.
+
+Run the Dataverse Software Installer Script
+-------------------------------------------
+
+Navigate to the directory where you cloned the Dataverse Software git repo change directories to the ``scripts/installer`` directory like this:
+
+``cd scripts/installer``
+
+Create a Python virtual environment, activate it, then install dependencies:
+
+``python3 -m venv venv``
+
+``source venv/bin/activate``
+
+``pip install psycopg2-binary``
+
+The installer will try to connect to the SMTP server you tell it to use. If you haven't used the Docker Compose option for setting up the dependencies, or you don't have a mail server handy, you can run ``nc -l 25`` in another terminal and choose "localhost" (the default) to get past this check.
+
+Finally, run the installer (see also :download:`README_python.txt <../../../../scripts/installer/README_python.txt>` if necessary):
+
+``python3 install.py``
+
+Verify the Dataverse Software is Running
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After the script has finished, you should be able to log into your Dataverse installation with the following credentials:
+
+- http://localhost:8080
+- username: dataverseAdmin
+- password: admin
+
+Configure Your Development Environment for Publishing
+-----------------------------------------------------
+
+Run the following command:
+
+``curl http://localhost:8080/api/admin/settings/:DoiProvider -X PUT -d FAKE``
+
+This will disable DOI registration by using a fake (in-code) DOI provider. Please note that this feature is only available in Dataverse Software 4.10+ and that at present, the UI will give no indication that the DOIs thus minted are fake.
+
+Developers may also wish to consider using :ref:`PermaLinks `
+
+Configure Your Development Environment for GUI Edits
+----------------------------------------------------
+
+Out of the box, a JSF setting is configured for production use and prevents edits to the GUI (xhtml files) from being visible unless you do a full deployment.
+
+It is recommended that you run the following command so that simply saving the xhtml file in Netbeans is enough for the change to show up.
+
+``asadmin create-system-properties "dataverse.jsf.refresh-period=1"``
+
+For more on JSF settings like this, see :ref:`jsf-config`.
+
+Next Steps
+----------
+
+If you can log in to the Dataverse installation, great! If not, please see the :doc:`troubleshooting` section. For further assistance, please see "Getting Help" in the :doc:`intro` section.
+
+You're almost ready to start hacking on code. Now that the installer script has you up and running, you need to continue on to the :doc:`tips` section to get set up to deploy code from your IDE or the command line.
+
+----
+
+Previous: :doc:`intro` | Next: :doc:`tips`
diff --git a/doc/sphinx-guides/source/developers/containers.rst b/doc/sphinx-guides/source/developers/containers.rst
index b42f7f5a2e2..175b178b455 100755
--- a/doc/sphinx-guides/source/developers/containers.rst
+++ b/doc/sphinx-guides/source/developers/containers.rst
@@ -25,11 +25,6 @@ The primary community-lead projects (which the core team is drawing inspiration
- https://github.com/IQSS/dataverse-docker
- https://github.com/IQSS/dataverse-kubernetes (especially the https://github.com/EOSC-synergy/dataverse-kubernetes fork)
-Deprecated Projects
--------------------
-
-The :doc:`testing` section mentions using docker-aio for integration tests. We do not plan to keep this project alive.
-
Using Containers for Reproducible Research
------------------------------------------
diff --git a/doc/sphinx-guides/source/developers/debugging.rst b/doc/sphinx-guides/source/developers/debugging.rst
index 2088afe5521..50e8901b1ff 100644
--- a/doc/sphinx-guides/source/developers/debugging.rst
+++ b/doc/sphinx-guides/source/developers/debugging.rst
@@ -20,8 +20,8 @@ during development without recompiling. Changing the options will require at lea
how you get these options in. (Variable substitution only happens during deployment and when using system properties
or environment variables, you'll need to pass these into the domain, which usually will require an app server restart.)
-Please note that since Payara 5.2021.1 supporting MicroProfile Config 2.0, you can
-`use profiles `_
+Please note you can use
+`MicroProfile Config `_
to maintain your settings more easily for different environments.
.. list-table::
diff --git a/doc/sphinx-guides/source/developers/dev-environment.rst b/doc/sphinx-guides/source/developers/dev-environment.rst
index b3f7fb1c1af..a4e79c3bb75 100755
--- a/doc/sphinx-guides/source/developers/dev-environment.rst
+++ b/doc/sphinx-guides/source/developers/dev-environment.rst
@@ -2,263 +2,81 @@
Development Environment
=======================
-These instructions are purposefully opinionated and terse to help you get your development environment up and running as quickly as possible! Please note that familiarity with running commands from the terminal is assumed.
+These instructions are oriented around Docker but the "classic" instructions we used for Dataverse 4 and 5 are still available at :doc:`classic-dev-env`.
.. contents:: |toctitle|
:local:
-Quick Start (Docker)
---------------------
+.. _container-dev-quickstart:
-The quickest way to get Dataverse running is in Docker as explained in :doc:`../container/dev-usage` section of the Container Guide.
-
-
-Classic Dev Environment
------------------------
-
-Since before Docker existed, we have encouraged installing Dataverse and all its dependencies directly on your development machine, as described below. This can be thought of as the "classic" development environment for Dataverse.
+Quickstart
+----------
-However, in 2023 we decided that we'd like to encourage all developers to start using Docker instead and opened https://github.com/IQSS/dataverse/issues/9616 to indicate that we plan to rewrite this page to recommend the use of Docker.
+First, install Java 11, Maven, and Docker.
-There's nothing wrong with the classic instructions below and we don't plan to simply delete them. They are a valid alternative to running Dataverse in Docker. We will likely move them to another page.
+After cloning the `dataverse repo `_, run this:
-Set Up Dependencies
--------------------
+``mvn -Pct clean package docker:run``
-Supported Operating Systems
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+After some time you should be able to log in:
-Mac OS X or Linux is required because the setup scripts assume the presence of standard Unix utilities.
+- url: http://localhost:8080
+- username: dataverseAdmin
+- password: admin1
-Windows is gaining support through Docker as described in the :doc:`windows` section.
+Detailed Steps
+--------------
Install Java
~~~~~~~~~~~~
The Dataverse Software requires Java 11.
-We suggest downloading OpenJDK from https://adoptopenjdk.net
+On Mac and Windows, we suggest downloading OpenJDK from https://adoptium.net (formerly `AdoptOpenJDK `_) or `SDKMAN `_.
On Linux, you are welcome to use the OpenJDK available from package managers.
-Install Netbeans or Maven
-~~~~~~~~~~~~~~~~~~~~~~~~~
+Install Maven
+~~~~~~~~~~~~~
-NetBeans IDE is recommended, and can be downloaded from http://netbeans.org . Developers may use any editor or IDE. We recommend NetBeans because it is free, works cross platform, has good support for Jakarta EE projects, and includes a required build tool, Maven.
+Follow instructions at https://maven.apache.org
-Below we describe how to build the Dataverse Software war file with Netbeans but if you prefer to use only Maven, you can find installation instructions in the :doc:`tools` section.
+Install and Start Docker
+~~~~~~~~~~~~~~~~~~~~~~~~
-Install Homebrew (Mac Only)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Follow instructions at https://www.docker.com
-On Mac, install Homebrew to simplify the steps below: https://brew.sh
+Be sure to start Docker.
-Clone the Dataverse Software Git Repo
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Git Clone Repo
+~~~~~~~~~~~~~~
Fork https://github.com/IQSS/dataverse and then clone your fork like this:
``git clone git@github.com:[YOUR GITHUB USERNAME]/dataverse.git``
-Build the Dataverse Software War File
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you installed Netbeans, follow these steps:
-
-- Launch Netbeans and click "File" and then "Open Project". Navigate to where you put the Dataverse Software code and double-click "Dataverse" to open the project.
-- If you see "resolve project problems," go ahead and let Netbeans try to resolve them. This will probably including downloading dependencies, which can take a while.
-- Allow Netbeans to install nb-javac (required for Java 8 and below).
-- Select "Dataverse" under Projects and click "Run" in the menu and then "Build Project (Dataverse)". Check back for "BUILD SUCCESS" at the end.
-
-If you installed Maven instead of Netbeans, run ``mvn package``. Check for "BUILD SUCCESS" at the end.
-
-NOTE: Do you use a locale different than ``en_US.UTF-8`` on your development machine? Are you in a different timezone
-than Harvard (Eastern Time)? You might experience issues while running tests that were written with these settings
-in mind. The Maven ``pom.xml`` tries to handle this for you by setting the locale to ``en_US.UTF-8`` and timezone
-``UTC``, but more, not yet discovered building or testing problems might lurk in the shadows.
-
-Install jq
-~~~~~~~~~~
-
-On Mac, run this command:
-
-``brew install jq``
-
-On Linux, install ``jq`` from your package manager or download a binary from http://stedolan.github.io/jq/
-
-Install Payara
-~~~~~~~~~~~~~~
-
-Payara 5.2022.3 or higher is required.
-
-To install Payara, run the following commands:
-
-``cd /usr/local``
-
-``sudo curl -O -L https://nexus.payara.fish/repository/payara-community/fish/payara/distributions/payara/5.2022.3/payara-5.2022.3.zip``
-
-``sudo unzip payara-5.2022.3.zip``
-
-``sudo chown -R $USER /usr/local/payara5``
-
-If nexus.payara.fish is ever down for maintenance, Payara distributions are also available from https://repo1.maven.org/maven2/fish/payara/distributions/payara/
-
-Install Service Dependencies Directly on localhost
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Install PostgreSQL
-^^^^^^^^^^^^^^^^^^
-
-The Dataverse Software has been tested with PostgreSQL versions up to 13. PostgreSQL version 10+ is required.
-
-On Mac, go to https://www.postgresql.org/download/macosx/ and choose "Interactive installer by EDB" option. Note that version 13.5 is used in the command line examples below, but the process should be similar for other versions. When prompted to set a password for the "database superuser (postgres)" just enter "password".
-
-After installation is complete, make a backup of the ``pg_hba.conf`` file like this:
-
-``sudo cp /Library/PostgreSQL/13/data/pg_hba.conf /Library/PostgreSQL/13/data/pg_hba.conf.orig``
-
-Then edit ``pg_hba.conf`` with an editor such as vi:
-
-``sudo vi /Library/PostgreSQL/13/data/pg_hba.conf``
-
-In the "METHOD" column, change all instances of "scram-sha-256" (or whatever is in that column) to "trust". This will make it so PostgreSQL doesn't require a password.
-
-In the Finder, click "Applications" then "PostgreSQL 13" and launch the "Reload Configuration" app. Click "OK" after you see "server signaled".
-
-Next, to confirm the edit worked, launch the "pgAdmin" application from the same folder. Under "Browser", expand "Servers" and double click "PostgreSQL 13". When you are prompted for a password, leave it blank and click "OK". If you have successfully edited "pg_hba.conf", you can get in without a password.
-
-On Linux, you should just install PostgreSQL using your favorite package manager, such as ``yum``. (Consult the PostgreSQL section of :doc:`/installation/prerequisites` in the main Installation guide for more info and command line examples). Find ``pg_hba.conf`` and set the authentication method to "trust" and restart PostgreSQL.
-
-Install Solr
-^^^^^^^^^^^^
-
-`Solr `_ 8.11.1 is required.
-
-To install Solr, execute the following commands:
-
-``sudo mkdir /usr/local/solr``
-
-``sudo chown $USER /usr/local/solr``
-
-``cd /usr/local/solr``
-
-``curl -O http://archive.apache.org/dist/lucene/solr/8.11.1/solr-8.11.1.tgz``
-
-``tar xvfz solr-8.11.1.tgz``
-
-``cd solr-8.11.1/server/solr``
-
-``cp -r configsets/_default collection1``
+Build and Run
+~~~~~~~~~~~~~
-``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/schema.xml``
+Change into the ``dataverse`` directory you just cloned and run the following command:
-``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/schema_dv_mdb_fields.xml``
+``mvn -Pct clean package docker:run``
-``mv schema*.xml collection1/conf``
+Verify
+~~~~~~
-``curl -O https://raw.githubusercontent.com/IQSS/dataverse/develop/conf/solr/8.11.1/solrconfig.xml``
+After some time you should be able to log in:
-``mv solrconfig.xml collection1/conf/solrconfig.xml``
-
-``cd /usr/local/solr/solr-8.11.1``
-
-(Please note that the extra jetty argument below is a security measure to limit connections to Solr to only your computer. For extra security, run a firewall.)
-
-``bin/solr start -j "-Djetty.host=127.0.0.1"``
-
-``bin/solr create_core -c collection1 -d server/solr/collection1/conf``
-
-Install Service Dependencies Using Docker Compose
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-To avoid having to install service dependencies like PostgreSQL or Solr directly on your localhost, there is the alternative of using the ``docker-compose-dev.yml`` file available in the repository root. For this option you need to have Docker and Docker Compose installed on your machine.
-
-The ``docker-compose-dev.yml`` can be configured to only run the service dependencies necessary to support a Dataverse installation running directly on localhost. In addition to PostgreSQL and Solr, it also runs a SMTP server.
-
-Before running the Docker Compose file, you need to update the value of the ``DATAVERSE_DB_USER`` environment variable to ``postgres``. The variable can be found inside the ``.env`` file in the repository root. This step is required as the Dataverse installation script expects that database user.
-
-To run the Docker Compose file, go to the Dataverse repository root, then run:
-
-``docker-compose -f docker-compose-dev.yml up -d --scale dev_dataverse=0``
-
-Note that this command omits the Dataverse container defined in the Docker Compose file, since Dataverse is going to be installed directly on localhost in the next section.
-
-The command runs the containers in detached mode, but if you want to run them attached and thus view container logs in real time, remove the ``-d`` option from the command.
-
-Data volumes of each dependency will be persisted inside the ``docker-dev-volumes`` folder, inside the repository root.
-
-If you want to stop the containers, then run (for detached mode only, otherwise use ``Ctrl + C``):
-
-``docker-compose -f docker-compose-dev.yml stop``
-
-If you want to remove the containers, then run:
-
-``docker-compose -f docker-compose-dev.yml down``
-
-If you want to run a single container (the mail server, for example) then run:
-
-``docker-compose -f docker-compose-dev.yml up dev_smtp``
-
-For a fresh installation, and before running the Software Installer Script, it is recommended to delete the docker-dev-env folder to avoid installation problems due to existing data in the containers.
-
-Run the Dataverse Software Installer Script
--------------------------------------------
-
-Navigate to the directory where you cloned the Dataverse Software git repo change directories to the ``scripts/installer`` directory like this:
-
-``cd scripts/installer``
-
-Create a Python virtual environment, activate it, then install dependencies:
-
-``python3 -m venv venv``
-
-``source venv/bin/activate``
-
-``pip install psycopg2-binary``
-
-The installer will try to connect to the SMTP server you tell it to use. If you haven't used the Docker Compose option for setting up the dependencies, or you don't have a mail server handy, you can run ``nc -l 25`` in another terminal and choose "localhost" (the default) to get past this check.
-
-Finally, run the installer (see also :download:`README_python.txt <../../../../scripts/installer/README_python.txt>` if necessary):
-
-``python3 install.py``
-
-Verify the Dataverse Software is Running
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-After the script has finished, you should be able to log into your Dataverse installation with the following credentials:
-
-- http://localhost:8080
+- url: http://localhost:8080
- username: dataverseAdmin
-- password: admin
-
-Configure Your Development Environment for Publishing
------------------------------------------------------
-
-Run the following command:
-
-``curl http://localhost:8080/api/admin/settings/:DoiProvider -X PUT -d FAKE``
-
-This will disable DOI registration by using a fake (in-code) DOI provider. Please note that this feature is only available in Dataverse Software 4.10+ and that at present, the UI will give no indication that the DOIs thus minted are fake.
-
-Developers may also wish to consider using :ref:`PermaLinks `
-
-Configure Your Development Environment for GUI Edits
-----------------------------------------------------
-
-Out of the box, a JSF setting is configured for production use and prevents edits to the GUI (xhtml files) from being visible unless you do a full deployment.
-
-It is recommended that you run the following command so that simply saving the xhtml file in Netbeans is enough for the change to show up.
-
-``asadmin create-system-properties "dataverse.jsf.refresh-period=1"``
-
-For more on JSF settings like this, see :ref:`jsf-config`.
-
-Next Steps
-----------
+- password: admin1
-If you can log in to the Dataverse installation, great! If not, please see the :doc:`troubleshooting` section. For further assistance, please see "Getting Help" in the :doc:`intro` section.
+More Information
+----------------
-You're almost ready to start hacking on code. Now that the installer script has you up and running, you need to continue on to the :doc:`tips` section to get set up to deploy code from your IDE or the command line.
+See also the :doc:`/container/dev-usage` section of the Container Guide.
-----
+Getting Help
+------------
-Previous: :doc:`intro` | Next: :doc:`tips`
+Please feel free to reach out at https://chat.dataverse.org or https://groups.google.com/g/dataverse-dev if you have any difficulty setting up a dev environment!
diff --git a/doc/sphinx-guides/source/developers/index.rst b/doc/sphinx-guides/source/developers/index.rst
index d70b682fcda..c77ddc13519 100755
--- a/doc/sphinx-guides/source/developers/index.rst
+++ b/doc/sphinx-guides/source/developers/index.rst
@@ -41,4 +41,5 @@ Developer Guide
dataset-migration-api
workflows
fontcustom
+ classic-dev-env
diff --git a/doc/sphinx-guides/source/developers/make-data-count.rst b/doc/sphinx-guides/source/developers/make-data-count.rst
index a3c0d10dc5e..ada0f13bb2f 100644
--- a/doc/sphinx-guides/source/developers/make-data-count.rst
+++ b/doc/sphinx-guides/source/developers/make-data-count.rst
@@ -38,7 +38,7 @@ Next you need to have the Dataverse installation add some entries to the log tha
Next you should run Counter Processor to convert the log into a SUSHI report, which is in JSON format. Before running Counter Processor, you need to put a configuration file into place. As a starting point use :download:`counter-processor-config.yaml <../../../../scripts/vagrant/counter-processor-config.yaml>` and edit the file, paying particular attention to the following settings:
-- ``log_name_pattern`` You might want something like ``/usr/local/payara5/glassfish/domains/domain1/logs/counter_(yyyy-mm-dd).log``
+- ``log_name_pattern`` You might want something like ``/usr/local/payara6/glassfish/domains/domain1/logs/counter_(yyyy-mm-dd).log``
- ``year_month`` You should probably set this to the current month.
- ``output_file`` This needs to be a directory that the "dataverse" Unix user can read but that the "counter" user can write to. In dev, you can probably get away with "/tmp" as the directory.
- ``platform`` Out of the box from Counter Processor this is set to ``Dash`` but this should be changed to match the name of your Dataverse installation. Examples are "Harvard Dataverse Repository" for Harvard University or "LibraData" for the University of Virginia.
diff --git a/doc/sphinx-guides/source/developers/testing.rst b/doc/sphinx-guides/source/developers/testing.rst
index c228d8e20ca..f788719c9c6 100755
--- a/doc/sphinx-guides/source/developers/testing.rst
+++ b/doc/sphinx-guides/source/developers/testing.rst
@@ -173,12 +173,7 @@ Finally, run the script:
Running the full API test suite using Docker
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. note::
- Sunsetting of this module is imminent.** There is no schedule yet, but expect it to go away.
- Please let the `Dataverse Containerization Working Group `_ know if you are a user and
- what should be preserved.
-
-To run the full suite of integration tests on your laptop, we recommend using the "all in one" Docker configuration described in ``conf/docker-aio/readme.md`` in the root of the repo.
+To run the full suite of integration tests on your laptop, running Dataverse and its dependencies in Docker, as explained in the :doc:`/container/dev-usage` section of the Container Guide.
Alternatively, you can run tests against the app server running on your laptop by following the "getting set up" steps below.
@@ -308,9 +303,9 @@ To run these tests, simply call out to Maven:
Measuring Coverage of Integration Tests
---------------------------------------
-Measuring the code coverage of integration tests with Jacoco requires several steps. In order to make these steps clear we'll use "/usr/local/payara5" as the Payara directory and "dataverse" as the Payara Unix user.
+Measuring the code coverage of integration tests with Jacoco requires several steps. In order to make these steps clear we'll use "/usr/local/payara6" as the Payara directory and "dataverse" as the Payara Unix user.
-Please note that this was tested under Glassfish 4 but it is hoped that the same steps will work with Payara 5.
+Please note that this was tested under Glassfish 4 but it is hoped that the same steps will work with Payara.
Add jacocoagent.jar to Payara
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -329,9 +324,9 @@ Note that we are running the following commands as the user "dataverse". In shor
cd local/jacoco-0.8.1
wget https://github.com/jacoco/jacoco/releases/download/v0.8.1/jacoco-0.8.1.zip
unzip jacoco-0.8.1.zip
- /usr/local/payara5/bin/asadmin stop-domain
- cp /home/dataverse/local/jacoco-0.8.1/lib/jacocoagent.jar /usr/local/payara5/glassfish/lib
- /usr/local/payara5/bin/asadmin start-domain
+ /usr/local/payara6/bin/asadmin stop-domain
+ cp /home/dataverse/local/jacoco-0.8.1/lib/jacocoagent.jar /usr/local/payara6/glassfish/lib
+ /usr/local/payara6/bin/asadmin start-domain
Add jacococli.jar to the WAR File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -354,21 +349,21 @@ Run this as the "dataverse" user.
.. code-block:: bash
- /usr/local/payara5/bin/asadmin deploy dataverse-jacoco.war
+ /usr/local/payara6/bin/asadmin deploy dataverse-jacoco.war
-Note that after deployment the file "/usr/local/payara5/glassfish/domains/domain1/config/jacoco.exec" exists and is empty.
+Note that after deployment the file "/usr/local/payara6/glassfish/domains/domain1/config/jacoco.exec" exists and is empty.
Run Integration Tests
~~~~~~~~~~~~~~~~~~~~~
Note that even though you see "docker-aio" in the command below, we assume you are not necessarily running the test suite within Docker. (Some day we'll probably move this script to another directory.) For this reason, we pass the URL with the normal port (8080) that app servers run on to the ``run-test-suite.sh`` script.
-Note that "/usr/local/payara5/glassfish/domains/domain1/config/jacoco.exec" will become non-empty after you stop and start Payara. You must stop and start Payara before every run of the integration test suite.
+Note that "/usr/local/payara6/glassfish/domains/domain1/config/jacoco.exec" will become non-empty after you stop and start Payara. You must stop and start Payara before every run of the integration test suite.
.. code-block:: bash
- /usr/local/payara5/bin/asadmin stop-domain
- /usr/local/payara5/bin/asadmin start-domain
+ /usr/local/payara6/bin/asadmin stop-domain
+ /usr/local/payara6/bin/asadmin start-domain
git clone https://github.com/IQSS/dataverse.git
cd dataverse
conf/docker-aio/run-test-suite.sh http://localhost:8080
@@ -383,7 +378,7 @@ Run these commands as the "dataverse" user. The ``cd dataverse`` means that you
.. code-block:: bash
cd dataverse
- java -jar /home/dataverse/local/jacoco-0.8.1/lib/jacococli.jar report --classfiles target/classes --sourcefiles src/main/java --html target/coverage-it/ /usr/local/payara5/glassfish/domains/domain1/config/jacoco.exec
+ java -jar /home/dataverse/local/jacoco-0.8.1/lib/jacococli.jar report --classfiles target/classes --sourcefiles src/main/java --html target/coverage-it/ /usr/local/payara6/glassfish/domains/domain1/config/jacoco.exec
Read Code Coverage Report
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinx-guides/source/developers/tips.rst b/doc/sphinx-guides/source/developers/tips.rst
index bf75a05f84e..e1ee40cafa5 100755
--- a/doc/sphinx-guides/source/developers/tips.rst
+++ b/doc/sphinx-guides/source/developers/tips.rst
@@ -19,20 +19,20 @@ Undeploy the war File from the Dataverse Software Installation Script
Because the initial deployment of the war file was done outside of Netbeans by the Dataverse Software installation script, it's a good idea to undeploy that war file to give Netbeans a clean slate to work with.
-Assuming you installed Payara in ``/usr/local/payara5``, run the following ``asadmin`` command to see the version of the Dataverse Software that the Dataverse Software installation script deployed:
+Assuming you installed Payara in ``/usr/local/payara6``, run the following ``asadmin`` command to see the version of the Dataverse Software that the Dataverse Software installation script deployed:
-``/usr/local/payara5/bin/asadmin list-applications``
+``/usr/local/payara6/bin/asadmin list-applications``
You will probably see something like ``dataverse-5.0 `` as the output. To undeploy, use whichever version you see like this:
-``/usr/local/payara5/bin/asadmin undeploy dataverse-5.0``
+``/usr/local/payara6/bin/asadmin undeploy dataverse-5.0``
Now that Payara doesn't have anything deployed, we can proceed with getting Netbeans set up to deploy the code.
Add Payara as a Server in Netbeans
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Launch Netbeans and click "Tools" and then "Servers". Click "Add Server" and select "Payara Server" and set the installation location to ``/usr/local/payara5``. The defaults are fine so you can click "Next" and "Finish".
+Launch Netbeans and click "Tools" and then "Servers". Click "Add Server" and select "Payara Server" and set the installation location to ``/usr/local/payara6``. The defaults are fine so you can click "Next" and "Finish".
Please note that if you are on a Mac, Netbeans may be unable to start Payara due to proxy settings in Netbeans. Go to the "General" tab in Netbeans preferences and click "Test connection" to see if you are affected. If you get a green checkmark, you're all set. If you get a red exclamation mark, change "Proxy Settings" to "No Proxy" and retest. A more complicated answer having to do with changing network settings is available at https://discussions.apple.com/thread/7680039?answerId=30715103022#30715103022 and the bug is also described at https://netbeans.org/bugzilla/show_bug.cgi?id=268076
@@ -117,7 +117,7 @@ Deploying With ``asadmin``
Sometimes you want to deploy code without using Netbeans or from the command line on a server you have ssh'ed into.
-For the ``asadmin`` commands below, we assume you have already changed directories to ``/usr/local/payara5/glassfish/bin`` or wherever you have installed Payara.
+For the ``asadmin`` commands below, we assume you have already changed directories to ``/usr/local/payara6/glassfish/bin`` or wherever you have installed Payara.
There are four steps to this process:
diff --git a/doc/sphinx-guides/source/developers/tools.rst b/doc/sphinx-guides/source/developers/tools.rst
index cbd27d6e8d2..238db7ce7b0 100755
--- a/doc/sphinx-guides/source/developers/tools.rst
+++ b/doc/sphinx-guides/source/developers/tools.rst
@@ -38,7 +38,7 @@ From the root of the git repo (where the ``Vagrantfile`` is), run ``vagrant up``
Please note that running ``vagrant up`` for the first time should run the ``downloads/download.sh`` script for you to download required software such as an app server, Solr, etc. However, these dependencies change over time so it's a place to look if ``vagrant up`` was working but later fails.
-On Windows if you see an error like ``/usr/bin/perl^M: bad interpreter`` you might need to run ``dos2unix`` on the installation scripts.
+On Windows if you see an error like ``/usr/bin/python^M: bad interpreter`` you might need to run ``dos2unix`` on the installation scripts.
PlantUML
++++++++
diff --git a/doc/sphinx-guides/source/installation/advanced.rst b/doc/sphinx-guides/source/installation/advanced.rst
index a842d566595..87f2a4fd0ab 100644
--- a/doc/sphinx-guides/source/installation/advanced.rst
+++ b/doc/sphinx-guides/source/installation/advanced.rst
@@ -13,8 +13,8 @@ Multiple App Servers
You should be conscious of the following when running multiple app servers.
- Only one app server can be the dedicated timer server, as explained in the :doc:`/admin/timers` section of the Admin Guide.
-- When users upload a logo or footer for their Dataverse collection using the "theme" feature described in the :doc:`/user/dataverse-management` section of the User Guide, these logos are stored only on the app server the user happened to be on when uploading the logo. By default these logos and footers are written to the directory ``/usr/local/payara5/glassfish/domains/domain1/docroot/logos``.
-- When a sitemap is created by an app server it is written to the filesystem of just that app server. By default the sitemap is written to the directory ``/usr/local/payara5/glassfish/domains/domain1/docroot/sitemap``.
+- When users upload a logo or footer for their Dataverse collection using the "theme" feature described in the :doc:`/user/dataverse-management` section of the User Guide, these logos are stored only on the app server the user happened to be on when uploading the logo. By default these logos and footers are written to the directory ``/usr/local/payara6/glassfish/domains/domain1/docroot/logos``.
+- When a sitemap is created by an app server it is written to the filesystem of just that app server. By default the sitemap is written to the directory ``/usr/local/payara6/glassfish/domains/domain1/docroot/sitemap``.
- If Make Data Count is used, its raw logs must be copied from each app server to single instance of Counter Processor. See also :ref:`:MDCLogPath` section in the Configuration section of this guide and the :doc:`/admin/make-data-count` section of the Admin Guide.
- Dataset draft version logging occurs separately on each app server. See :ref:`edit-draft-versions-logging` section in Monitoring of the Admin Guide for details.
- Password aliases (``dataverse.db.password``, etc.) are stored per app server.
diff --git a/doc/sphinx-guides/source/installation/config.rst b/doc/sphinx-guides/source/installation/config.rst
index 8493702406b..f9fe74afc7c 100644
--- a/doc/sphinx-guides/source/installation/config.rst
+++ b/doc/sphinx-guides/source/installation/config.rst
@@ -501,7 +501,7 @@ Logging & Slow Performance
File Storage: Using a Local Filesystem and/or Swift and/or Object Stores and/or Trusted Remote Stores
-----------------------------------------------------------------------------------------------------
-By default, a Dataverse installation stores all data files (files uploaded by end users) on the filesystem at ``/usr/local/payara5/glassfish/domains/domain1/files``. This path can vary based on answers you gave to the installer (see the :ref:`dataverse-installer` section of the Installation Guide) or afterward by reconfiguring the ``dataverse.files.\.directory`` JVM option described below.
+By default, a Dataverse installation stores all data files (files uploaded by end users) on the filesystem at ``/usr/local/payara6/glassfish/domains/domain1/files``. This path can vary based on answers you gave to the installer (see the :ref:`dataverse-installer` section of the Installation Guide) or afterward by reconfiguring the ``dataverse.files.\.directory`` JVM option described below.
A Dataverse installation can alternately store files in a Swift or S3-compatible object store, and can now be configured to support multiple stores at once. With a multi-store configuration, the location for new files can be controlled on a per-Dataverse collection basis.
@@ -975,7 +975,7 @@ All of these processes are triggered after finishing transfers over the wire and
Before being moved there,
- JSF Web UI uploads are stored at :ref:`${dataverse.files.uploads} `, defaulting to
- ``/usr/local/payara5/glassfish/domains/domain1/uploads`` folder in a standard installation. This place is
+ ``/usr/local/payara6/glassfish/domains/domain1/uploads`` folder in a standard installation. This place is
configurable and might be set to a separate disk volume where stale uploads are purged periodically.
- API uploads are stored at the system's temporary files location indicated by the Java system property
``java.io.tmpdir``, defaulting to ``/tmp`` on Linux. If this location is backed by a `tmpfs `_
@@ -1053,7 +1053,7 @@ Custom Navbar Logo
The Dataverse Software allows you to replace the default Dataverse Project icon and name branding in the navbar with your own custom logo. Note that this logo is separate from the logo used in the theme of the root Dataverse collection (see :ref:`theme`).
-The custom logo image file is expected to be small enough to fit comfortably in the navbar, no more than 50 pixels in height and 160 pixels in width. Create a ``navbar`` directory in your Payara ``logos`` directory and place your custom logo there. By default, your logo image file will be located at ``/usr/local/payara5/glassfish/domains/domain1/docroot/logos/navbar/logo.png``.
+The custom logo image file is expected to be small enough to fit comfortably in the navbar, no more than 50 pixels in height and 160 pixels in width. Create a ``navbar`` directory in your Payara ``logos`` directory and place your custom logo there. By default, your logo image file will be located at ``/usr/local/payara6/glassfish/domains/domain1/docroot/logos/navbar/logo.png``.
Given this location for the custom logo image file, run this curl command to add it to your settings:
@@ -1518,7 +1518,7 @@ The Google Cloud Archiver also requires a key file that must be renamed to 'goog
For example:
-``cp /usr/local/payara5/glassfish/domains/domain1/files/googlecloudkey.json``
+``cp /usr/local/payara6/glassfish/domains/domain1/files/googlecloudkey.json``
.. _S3 Archiver Configuration:
@@ -1634,7 +1634,7 @@ You have a couple of options for putting an updated robots.txt file into product
For more of an explanation of ``ProxyPassMatch`` see the :doc:`shibboleth` section.
-If you are not fronting Payara with Apache you'll need to prevent Payara from serving the robots.txt file embedded in the war file by overwriting robots.txt after the war file has been deployed. The downside of this technique is that you will have to remember to overwrite robots.txt in the "exploded" war file each time you deploy the war file, which probably means each time you upgrade to a new version of the Dataverse Software. Furthermore, since the version of the Dataverse Software is always incrementing and the version can be part of the file path, you will need to be conscious of where on disk you need to replace the file. For example, for Dataverse Software 4.6.1 the path to robots.txt may be ``/usr/local/payara5/glassfish/domains/domain1/applications/dataverse-4.6.1/robots.txt`` with the version number ``4.6.1`` as part of the path.
+If you are not fronting Payara with Apache you'll need to prevent Payara from serving the robots.txt file embedded in the war file by overwriting robots.txt after the war file has been deployed. The downside of this technique is that you will have to remember to overwrite robots.txt in the "exploded" war file each time you deploy the war file, which probably means each time you upgrade to a new version of the Dataverse Software. Furthermore, since the version of the Dataverse Software is always incrementing and the version can be part of the file path, you will need to be conscious of where on disk you need to replace the file. For example, for Dataverse Software 4.6.1 the path to robots.txt may be ``/usr/local/payara6/glassfish/domains/domain1/applications/dataverse-4.6.1/robots.txt`` with the version number ``4.6.1`` as part of the path.
Creating a Sitemap and Submitting it to Search Engines
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1647,7 +1647,7 @@ Create or update your sitemap by adding the following curl command to cron to ru
This will create or update a file in the following location unless you have customized your installation directory for Payara:
-``/usr/local/payara5/glassfish/domains/domain1/docroot/sitemap/sitemap.xml``
+``/usr/local/payara6/glassfish/domains/domain1/docroot/sitemap/sitemap.xml``
On Dataverse installation with many datasets, the creation or updating of the sitemap can take a while. You can check Payara's server.log file for "BEGIN updateSiteMap" and "END updateSiteMap" lines to know when the process started and stopped and any errors in between.
@@ -1690,7 +1690,7 @@ When changing values these values with ``asadmin``, you'll need to delete the ol
``./asadmin create-jvm-options "-Ddataverse.fqdn=dataverse.example.com"``
-It's also possible to change these values by stopping Payara, editing ``payara5/glassfish/domains/domain1/config/domain.xml``, and restarting Payara.
+It's also possible to change these values by stopping Payara, editing ``payara6/glassfish/domains/domain1/config/domain.xml``, and restarting Payara.
.. _dataverse.fqdn:
@@ -1786,7 +1786,7 @@ Configure a folder to store the incoming file stream during uploads (before tran
Please also see :ref:`temporary-file-storage` for more details.
You can use an absolute path or a relative, which is relative to the application server domain directory.
-Defaults to ``./uploads``, which resolves to ``/usr/local/payara5/glassfish/domains/domain1/uploads`` in a default
+Defaults to ``./uploads``, which resolves to ``/usr/local/payara6/glassfish/domains/domain1/uploads`` in a default
installation.
Can also be set via *MicroProfile Config API* sources, e.g. the environment variable ``DATAVERSE_FILES_UPLOADS``.
@@ -3485,7 +3485,7 @@ Sets how long a cached metrics result is used before re-running the query for a
Sets the path where the raw Make Data Count logs are stored before being processed. If not set, no logs will be created for Make Data Count. See also the :doc:`/admin/make-data-count` section of the Admin Guide.
-``curl -X PUT -d '/usr/local/payara5/glassfish/domains/domain1/logs' http://localhost:8080/api/admin/settings/:MDCLogPath``
+``curl -X PUT -d '/usr/local/payara6/glassfish/domains/domain1/logs' http://localhost:8080/api/admin/settings/:MDCLogPath``
.. _:DisplayMDCMetrics:
diff --git a/doc/sphinx-guides/source/installation/installation-main.rst b/doc/sphinx-guides/source/installation/installation-main.rst
index 8559d6ce194..021a97415e3 100755
--- a/doc/sphinx-guides/source/installation/installation-main.rst
+++ b/doc/sphinx-guides/source/installation/installation-main.rst
@@ -28,8 +28,8 @@ Unpack the zip file - this will create the directory ``dvinstall``.
Just make sure the user running the installer has write permission to:
-- /usr/local/payara5/glassfish/lib
-- /usr/local/payara5/glassfish/domains/domain1
+- /usr/local/payara6/glassfish/lib
+- /usr/local/payara6/glassfish/domains/domain1
- the current working directory of the installer (it currently writes its logfile there), and
- your jvm-option specified files.dir
@@ -47,7 +47,7 @@ Follow the instructions in the text file.
The script will prompt you for some configuration values. If this is a test/evaluation installation, it may be possible to accept the default values provided for most of the settings:
- Internet Address of your host: localhost
-- Payara Directory: /usr/local/payara5
+- Payara Directory: /usr/local/payara6
- Payara User: current user running the installer script
- Administrator email address for this Dataverse installation: (none)
- SMTP (mail) server to relay notification messages: localhost
@@ -98,7 +98,7 @@ The supplied site URL will be saved under the JVM option :ref:`dataverse.siteUrl
**IMPORTANT:** Please note, that "out of the box" the installer will configure the Dataverse installation to leave unrestricted access to the administration APIs from (and only from) localhost. Please consider the security implications of this arrangement (anyone with shell access to the server can potentially mess with your Dataverse installation). An alternative solution would be to block open access to these sensitive API endpoints completely; and to only allow requests supplying a pre-defined "unblock token" (password). If you prefer that as a solution, please consult the supplied script ``post-install-api-block.sh`` for examples on how to set it up. See also "Securing Your Installation" under the :doc:`config` section.
-The Dataverse Software uses JHOVE_ to help identify the file format (CSV, PNG, etc.) for files that users have uploaded. The installer places files called ``jhove.conf`` and ``jhoveConfig.xsd`` into the directory ``/usr/local/payara5/glassfish/domains/domain1/config`` by default and makes adjustments to the jhove.conf file based on the directory into which you chose to install Payara.
+The Dataverse Software uses JHOVE_ to help identify the file format (CSV, PNG, etc.) for files that users have uploaded. The installer places files called ``jhove.conf`` and ``jhoveConfig.xsd`` into the directory ``/usr/local/payara6/glassfish/domains/domain1/config`` by default and makes adjustments to the jhove.conf file based on the directory into which you chose to install Payara.
.. _JHOVE: http://jhove.openpreservation.org
@@ -249,7 +249,7 @@ Deleting Uploaded Files
The path below will depend on the value for ``dataverse.files.directory`` as described in the :doc:`config` section:
-``rm -rf /usr/local/payara5/glassfish/domains/domain1/files``
+``rm -rf /usr/local/payara6/glassfish/domains/domain1/files``
Rerun Installer
^^^^^^^^^^^^^^^
diff --git a/doc/sphinx-guides/source/installation/intro.rst b/doc/sphinx-guides/source/installation/intro.rst
index 2251af7b81b..67fc774bdbd 100644
--- a/doc/sphinx-guides/source/installation/intro.rst
+++ b/doc/sphinx-guides/source/installation/intro.rst
@@ -48,7 +48,7 @@ If you've encountered a problem installing Dataverse and are ready to ask for he
- Operating system (usually a Linux distribution) and version.
- Output from the installer (STDOUT, STDERR).
- The ``scripts/api/setup-all.*.log`` files left behind by the installer.
-- The ``server.log`` file from Payara (by default at ``/usr/local/payara5/glassfish/domains/domain1/logs/server.log``).
+- The ``server.log`` file from Payara (by default at ``/usr/local/payara6/glassfish/domains/domain1/logs/server.log``).
Improving this Guide
--------------------
diff --git a/doc/sphinx-guides/source/installation/prerequisites.rst b/doc/sphinx-guides/source/installation/prerequisites.rst
index d95aa78bb26..b86bee9cd31 100644
--- a/doc/sphinx-guides/source/installation/prerequisites.rst
+++ b/doc/sphinx-guides/source/installation/prerequisites.rst
@@ -44,7 +44,7 @@ On RHEL/derivative you can make Java 11 the default with the ``alternatives`` co
Payara
------
-Payara 5.2022.3 is recommended. Newer versions might work fine, regular updates are recommended.
+Payara 6.2023.7 is recommended. Newer versions might work fine. Regular updates are recommended.
Installing Payara
=================
@@ -53,11 +53,11 @@ Installing Payara
# useradd dataverse
-- Download and install Payara (installed in ``/usr/local/payara5`` in the example commands below)::
+- Download and install Payara (installed in ``/usr/local/payara6`` in the example commands below)::
- # wget https://nexus.payara.fish/repository/payara-community/fish/payara/distributions/payara/5.2022.3/payara-5.2022.3.zip
- # unzip payara-5.2022.3.zip
- # mv payara5 /usr/local
+ # wget https://nexus.payara.fish/repository/payara-community/fish/payara/distributions/payara/6.2023.7/payara-6.2023.7.zip
+ # unzip payara-6.2023.7.zip
+ # mv payara6 /usr/local
If nexus.payara.fish is ever down for maintenance, Payara distributions are also available from https://repo1.maven.org/maven2/fish/payara/distributions/payara/
@@ -65,15 +65,15 @@ If you intend to install and run Payara under a service account (and we hope you
- Set service account permissions::
- # chown -R root:root /usr/local/payara5
- # chown dataverse /usr/local/payara5/glassfish/lib
- # chown -R dataverse:dataverse /usr/local/payara5/glassfish/domains/domain1
+ # chown -R root:root /usr/local/payara6
+ # chown dataverse /usr/local/payara6/glassfish/lib
+ # chown -R dataverse:dataverse /usr/local/payara6/glassfish/domains/domain1
After installation, you may chown the lib/ directory back to root; the installer only needs write access to copy the JDBC driver into that directory.
- Change from ``-client`` to ``-server`` under ``-client``::
- # vim /usr/local/payara5/glassfish/domains/domain1/config/domain.xml
+ # vim /usr/local/payara6/glassfish/domains/domain1/config/domain.xml
This recommendation comes from http://www.c2b2.co.uk/middleware-blog/glassfish-4-performance-tuning-monitoring-and-troubleshooting.php among other places.
diff --git a/downloads/.gitignore b/downloads/.gitignore
index 1b51bf4def7..c21ee37922f 100644
--- a/downloads/.gitignore
+++ b/downloads/.gitignore
@@ -1,4 +1,4 @@
-payara-5.201.zip
+payara-6.2023.6.zip
solr-7.3.0.tgz
weld-osgi-bundle-2.2.10.Final-glassfish4.jar
schemaSpy_5.0.0.jar
diff --git a/downloads/download.sh b/downloads/download.sh
index 7b9de0397cb..7ccff358820 100755
--- a/downloads/download.sh
+++ b/downloads/download.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-curl -L -O https://s3-eu-west-1.amazonaws.com/payara.fish/Payara+Downloads/5.2022.3/payara-5.2022.3.zip
+curl -L -O https://nexus.payara.fish/repository/payara-community/fish/payara/distributions/payara/6.2023.7/payara-6.2023.7.zip
curl -L -O https://archive.apache.org/dist/lucene/solr/8.11.1/solr-8.11.1.tgz
curl -L -O https://search.maven.org/remotecontent?filepath=org/jboss/weld/weld-osgi-bundle/2.2.10.Final/weld-osgi-bundle-2.2.10.Final-glassfish4.jar
curl -s -L http://sourceforge.net/projects/schemaspy/files/schemaspy/SchemaSpy%205.0.0/schemaSpy_5.0.0.jar/download > schemaSpy_5.0.0.jar
diff --git a/modules/container-base/src/main/docker/Dockerfile b/modules/container-base/src/main/docker/Dockerfile
index bbd02a14328..4e11e7835e6 100644
--- a/modules/container-base/src/main/docker/Dockerfile
+++ b/modules/container-base/src/main/docker/Dockerfile
@@ -75,6 +75,8 @@ WORKDIR /
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
# Mark these directories as mutuable data containers to avoid cluttering the images overlayfs at runtime.
VOLUME ${STORAGE_DIR} ${SECRETS_DIR} ${DUMPS_DIR}
+# Workaround for https://github.com/payara/Payara/issues/6344
+ENV JAVA_TOOL_OPTIONS="-Djdk.util.zip.disableZip64ExtraFieldValidation=true --add-opens=java.base/java.io=ALL-UNNAMED"
RUN <org.apache.jasper.servlet.JspServlet#org.apache.jasper.servlet.JspServlet\n \n development\n false\n \n \n genStrAsCharArray\n true\n #' "${DOMAIN_DIR}/config/default-web.xml"
+ sed -i 's#org.glassfish.wasp.servlet.JspServlet#org.glassfish.wasp.servlet.JspServlet\n \n development\n false\n \n \n genStrAsCharArray\n true\n #' "${DOMAIN_DIR}/config/default-web.xml"
# Cleanup old CA certificates to avoid unnecessary log clutter during startup
${SCRIPT_DIR}/removeExpiredCaCerts.sh
# Delete generated files
diff --git a/modules/container-base/src/main/docker/scripts/removeExpiredCaCerts.sh b/modules/container-base/src/main/docker/scripts/removeExpiredCaCerts.sh
index 205a9eda5d7..c019c09130e 100644
--- a/modules/container-base/src/main/docker/scripts/removeExpiredCaCerts.sh
+++ b/modules/container-base/src/main/docker/scripts/removeExpiredCaCerts.sh
@@ -8,6 +8,14 @@
set -euo pipefail
KEYSTORE="${DOMAIN_DIR}/config/cacerts.jks"
+if [ ! -r "${KEYSTORE}" ]; then
+ KEYSTORE="${DOMAIN_DIR}/config/cacerts.p12"
+ if [ ! -r "${KEYSTORE}" ]; then
+ echo "Could not find CA certs keystore"
+ exit 1
+ fi
+fi
+
keytool -list -v -keystore "${KEYSTORE}" -storepass changeit 2>/dev/null | \
grep -i 'alias\|until' > aliases.txt
diff --git a/modules/dataverse-parent/pom.xml b/modules/dataverse-parent/pom.xml
index 05f7874d31c..7c83ac39736 100644
--- a/modules/dataverse-parent/pom.xml
+++ b/modules/dataverse-parent/pom.xml
@@ -148,7 +148,7 @@
-Duser.timezone=${project.timezone} -Dfile.encoding=${project.build.sourceEncoding} -Duser.language=${project.language} -Duser.region=${project.region}
- 5.2022.3
+ 6.2023.742.5.18.11.11.12.290
@@ -374,10 +374,20 @@
Local repository for hosting jars not available from network repositories.file://${project.basedir}/local_lib
-
oss-sonatypeoss-sonatype
+
+ https://oss.sonatype.org/content/repositories/snapshots/
+
+
+ true
+
+
+
+ s01-oss-sonatype
+ s01-oss-sonatype
https://s01.oss.sonatype.org/content/repositories/snapshots/
@@ -385,7 +395,7 @@
true
- -->
+
@@ -393,11 +403,9 @@
ct
- 5.2022.5
+
diff --git a/modules/dataverse-spi/pom.xml b/modules/dataverse-spi/pom.xml
index 6235d309e89..b00053fe5e0 100644
--- a/modules/dataverse-spi/pom.xml
+++ b/modules/dataverse-spi/pom.xml
@@ -13,7 +13,7 @@
io.gdccdataverse-spi
- 1.0.0${project.version.suffix}
+ 2.0.0${project.version.suffix}jarDataverse SPI Plugin API
diff --git a/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/ExportDataProvider.java b/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/ExportDataProvider.java
index 228992c8288..d039ac39e8f 100644
--- a/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/ExportDataProvider.java
+++ b/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/ExportDataProvider.java
@@ -3,8 +3,8 @@
import java.io.InputStream;
import java.util.Optional;
-import javax.json.JsonArray;
-import javax.json.JsonObject;
+import jakarta.json.JsonArray;
+import jakarta.json.JsonObject;
/**
* Provides all the metadata Dataverse has about a given dataset that can then
diff --git a/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/XMLExporter.java b/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/XMLExporter.java
index 9afe7ba1cfd..3c3fa35c69d 100644
--- a/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/XMLExporter.java
+++ b/modules/dataverse-spi/src/main/java/io/gdcc/spi/export/XMLExporter.java
@@ -1,6 +1,6 @@
package io.gdcc.spi.export;
-import javax.ws.rs.core.MediaType;
+import jakarta.ws.rs.core.MediaType;
/**
* XML Exporter is an extension of the base Exporter interface that adds the
diff --git a/pom.xml b/pom.xml
index 96f598af0f5..df8daa7e019 100644
--- a/pom.xml
+++ b/pom.xml
@@ -96,7 +96,7 @@
io.gdccsword2-server
- 1.2.1
+ 2.0.0-SNAPSHOT
@@ -119,7 +119,7 @@
com.apicatalogtitanium-json-ld
- 1.3.0-SNAPSHOT
+ 1.3.2com.google.code.gson
@@ -160,12 +160,20 @@
flyway-core${flyway.version}
+
+
+ org.eclipse.persistence
+ org.eclipse.persistence.jpa
+ provided
+ com.google.guavaguava29.0-jrejar
+
+
org.eclipse.microprofile.configmicroprofile-config-api
@@ -174,13 +182,14 @@
jakarta.platformjakarta.jakartaee-api
- ${jakartaee-api.version}provided
-
+
+
+
- org.glassfish
- jakarta.json
+ org.eclipse.angus
+ angus-activationprovided
@@ -188,12 +197,20 @@
fish.payara.apipayara-apiprovided
+
+ ${payara.version}
+
+
+
- com.sun.mail
- jakarta.mail
+ org.eclipse.parsson
+ jakarta.jsonprovided
+
+
org.glassfishjakarta.faces
@@ -203,6 +220,7 @@
org.primefacesprimefaces11.0.0
+ jakartaorg.primefaces.themes
@@ -212,9 +230,10 @@
org.omnifacesomnifaces
- 3.8
+ 4.0-M13
+
jakarta.validationjakarta.validation-api
@@ -225,9 +244,12 @@
hibernate-validatorprovided
+
+
+
- org.glassfish
- jakarta.el
+ org.glassfish.expressly
+ expresslyprovided
@@ -343,12 +365,12 @@
org.ocpsoft.rewriterewrite-servlet
- 3.5.0.Final
+ 6.0.0-SNAPSHOTorg.ocpsoft.rewriterewrite-config-prettyfaces
- 3.5.0.Final
+ 6.0.0-SNAPSHOTedu.ucsb.nceas
@@ -394,7 +416,7 @@
oauth2-oidc-sdk10.7.1
-
+
io.gdccxoai-data-provider
@@ -415,15 +437,13 @@
- org.glassfish.jersey.containers
- jersey-container-servlet
- 2.23.2
+ org.glassfish.jersey.core
+ jersey-serverorg.glassfish.jersey.mediajersey-media-multipart
- 2.23.2com.mashape.unirest
@@ -513,7 +533,7 @@
io.gdccdataverse-spi
- 1.0.0
+ 2.0.0
diff --git a/scripts/dev/dev-rebuild.sh b/scripts/dev/dev-rebuild.sh
index 71857b14068..9eae195b135 100755
--- a/scripts/dev/dev-rebuild.sh
+++ b/scripts/dev/dev-rebuild.sh
@@ -1,8 +1,9 @@
#!/bin/sh
-PAYARA_DIR=/usr/local/payara5
+PAYARA_DIR=/usr/local/payara6
ASADMIN=$PAYARA_DIR/glassfish/bin/asadmin
DB_NAME=dvndb
DB_USER=dvnapp
+export PGPASSWORD=secret
echo "Checking if there is a war file to undeploy..."
LIST_APP=$($ASADMIN list-applications -t)
@@ -23,7 +24,7 @@ echo "Deleting ALL DATA FILES uploaded to Dataverse..."
rm -rf $PAYARA_DIR/glassfish/domains/domain1/files
echo "Terminating database sessions so we can drop the database..."
-psql -U postgres -c "
+psql -h localhost -U postgres -c "
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '$DB_NAME'
@@ -31,14 +32,14 @@ WHERE pg_stat_activity.datname = '$DB_NAME'
" template1
echo "Dropping the database..."
-psql -U $DB_USER -c "DROP DATABASE \"$DB_NAME\"" template1
+psql -h localhost -U $DB_USER -c "DROP DATABASE \"$DB_NAME\"" template1
echo $?
echo "Clearing out data from Solr..."
-curl http://localhost:8983/solr/collection1/update/json?commit=true -H "Content-type: application/json" -X POST -d "{\"delete\": { \"query\":\"*:*\"}}"
+curl "http://localhost:8983/solr/collection1/update/json?commit=true" -H "Content-type: application/json" -X POST -d "{\"delete\": { \"query\":\"*:*\"}}"
echo "Creating a new database..."
-psql -U $DB_USER -c "CREATE DATABASE \"$DB_NAME\" WITH OWNER = \"$DB_USER\"" template1
+psql -h localhost -U $DB_USER -c "CREATE DATABASE \"$DB_NAME\" WITH OWNER = \"$DB_USER\"" template1
echo $?
echo "Starting app server..."
@@ -53,7 +54,7 @@ cd scripts/api
cd ../..
echo "Creating SQL sequence..."
-psql -U $DB_USER $DB_NAME -f doc/sphinx-guides/source/_static/util/createsequence.sql
+psql -h localhost -U $DB_USER $DB_NAME -f doc/sphinx-guides/source/_static/util/createsequence.sql
echo "Setting DOI provider to \"FAKE\"..."
curl http://localhost:8080/api/admin/settings/:DoiProvider -X PUT -d FAKE
diff --git a/scripts/installer/Makefile b/scripts/installer/Makefile
index d40d4d792ea..a1fbfab782e 100644
--- a/scripts/installer/Makefile
+++ b/scripts/installer/Makefile
@@ -7,7 +7,6 @@ JHOVE_SCHEMA=${INSTALLER_ZIP_DIR}/jhoveConfig.xsd
SOLR_SCHEMA=${INSTALLER_ZIP_DIR}/schema.xml ${INSTALLER_ZIP_DIR}/update-fields.sh
SOLR_CONFIG=${INSTALLER_ZIP_DIR}/solrconfig.xml
PYTHON_FILES=${INSTALLER_ZIP_DIR}/README_python.txt ${INSTALLER_ZIP_DIR}/installConfig.py ${INSTALLER_ZIP_DIR}/installUtils.py ${INSTALLER_ZIP_DIR}/install.py ${INSTALLER_ZIP_DIR}/installAppServer.py ${INSTALLER_ZIP_DIR}/requirements.txt ${INSTALLER_ZIP_DIR}/default.config ${INSTALLER_ZIP_DIR}/interactive.config
-INSTALL_SCRIPT=${INSTALLER_ZIP_DIR}/install
installer: dvinstall.zip
diff --git a/scripts/installer/README.txt b/scripts/installer/README.txt
index 350a17fc00c..c3ed8211082 100644
--- a/scripts/installer/README.txt
+++ b/scripts/installer/README.txt
@@ -1,42 +1 @@
-The installer script (install) can be run either by a developer (inside the source tree), or by an end-user installing the Dataverse. The latter will obtain the script as part of the distribution bundle; and they will be running it inside the unzipped bundle directory.
-
-In the former (developer) case, the installer will be looking for the files it needs in the other directories in the source tree.
-For example, the war file (once built) can be found in ../../target/. The name of the war file will be dataverse-{VERSION}.war, where
-{VERSION} is the version number of the Dataverse, obtained from the pom file (../../pom.xml). For example, as of writing this README.txt (July 2015) the war file is ../../target/dataverse-4.1.war/
-
-When building a distribution archive, the Makefile will pile all the files that the installer needs in one directory (./dvinstall here) and then zip it up. We upload the resulting zip bundle on github as the actual software release. This way the end user only gets the files they actually need to install the Dataverse app. So they can do so without pulling the entire source tree.
-
-
-The installer script itself (the perl script ./install) knows to look for all these files in 2 places (for example, it will look for the war file in ../../target/; if it's not there, it'll assume this is a distribution bundle and look for it as ./dataverse.war)
-
-Here's the list of the files that the installer needs:
-
-the war file:
-target/dataverse-{VERSION}.war
-
-and also:
-
-from scripts/installer (this directory):
-
-install
-glassfish-setup.sh
-
-from scripts/api:
-
-setup-all.sh
-setup-builtin-roles.sh
-setup-datasetfields.sh
-setup-dvs.sh
-setup-identity-providers.sh
-setup-users.sh
-data (the entire directory with all its contents)
-
-from conf/jhove:
-
-jhove.conf
-
-SOLR schema and config files, from conf/solr/8.11.1:
-
-schema.xml
-schema_dv_mdb_fields.xml
-solrconfig.xml
+See README_python.txt
diff --git a/scripts/installer/as-setup.sh b/scripts/installer/as-setup.sh
index 49ebce059d2..8d0cbe60cf4 100755
--- a/scripts/installer/as-setup.sh
+++ b/scripts/installer/as-setup.sh
@@ -56,15 +56,15 @@ function preliminary_setup()
# avoid OutOfMemoryError: PermGen per http://eugenedvorkin.com/java-lang-outofmemoryerror-permgen-space-error-during-deployment-to-glassfish/
#./asadmin $ASADMIN_OPTS list-jvm-options
- # Note that these JVM options are different for Payara5 and Glassfish4:
+ # Note that these JVM options are different for Payara and Glassfish4:
# old Glassfish4 options: (commented out)
#./asadmin $ASADMIN_OPTS delete-jvm-options "-XX\:MaxPermSize=192m"
#./asadmin $ASADMIN_OPTS create-jvm-options "-XX\:MaxPermSize=512m"
#./asadmin $ASADMIN_OPTS create-jvm-options "-XX\:PermSize=256m"
- # payara5 ships with the "-server" option already in domain.xml, so no need:
+ # Payara ships with the "-server" option already in domain.xml, so no need:
#./asadmin $ASADMIN_OPTS delete-jvm-options -client
- # new Payara5 options: (thanks to donsizemore@unc.edu)
+ # new Payara options: (thanks to donsizemore@unc.edu)
./asadmin $ASADMIN_OPTS create-jvm-options "-XX\:MaxMetaspaceSize=512m"
./asadmin $ASADMIN_OPTS create-jvm-options "-XX\:MetaspaceSize=256m"
./asadmin $ASADMIN_OPTS create-jvm-options "-Dfish.payara.classloading.delegate=false"
@@ -155,18 +155,18 @@ function final_setup(){
if [ "$DOCKER_BUILD" = "true" ]
then
- FILES_DIR="/usr/local/payara5/glassfish/domains/domain1/files"
+ FILES_DIR="/usr/local/payara6/glassfish/domains/domain1/files"
RSERVE_HOST="localhost"
RSERVE_PORT="6311"
RSERVE_USER="rserve"
RSERVE_PASS="rserve"
HOST_ADDRESS="localhost\:8080"
- pushd /usr/local/payara5/glassfish/bin/
+ pushd /usr/local/payara6/glassfish/bin/
./asadmin start-domain domain1
preliminary_setup
- chmod -R 777 /usr/local/payara5/
- rm -rf /usr/local/payara5/glassfish/domains/domain1/generated
- rm -rf /usr/local/payara5/glassfish/domains/domain1/applications
+ chmod -R 777 /usr/local/payara6/
+ rm -rf /usr/local/payara6/glassfish/domains/domain1/generated
+ rm -rf /usr/local/payara6/glassfish/domains/domain1/applications
popd
exit 0
fi
@@ -276,7 +276,7 @@ if [ ! -d "$DOMAIN_DIR" ]
exit 2
fi
-echo "Setting up your app. server (Payara5) to support Dataverse"
+echo "Setting up your app. server (Payara) to support Dataverse"
echo "Payara directory: "$GLASSFISH_ROOT
echo "Domain directory: "$DOMAIN_DIR
diff --git a/scripts/installer/default.config b/scripts/installer/default.config
index 312dd2cb2d8..8647cd02416 100644
--- a/scripts/installer/default.config
+++ b/scripts/installer/default.config
@@ -1,7 +1,7 @@
[glassfish]
HOST_DNS_ADDRESS = localhost
GLASSFISH_USER = dataverse
-GLASSFISH_DIRECTORY = /usr/local/payara5
+GLASSFISH_DIRECTORY = /usr/local/payara6
GLASSFISH_ADMIN_USER = admin
GLASSFISH_ADMIN_PASSWORD = secret
GLASSFISH_HEAP = 2048
diff --git a/scripts/installer/install b/scripts/installer/install
deleted file mode 100755
index 2208f014606..00000000000
--- a/scripts/installer/install
+++ /dev/null
@@ -1,1538 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use Getopt::Long;
-use Socket;
-use File::Copy;
-
-# command line options:
-
-my $verbose;
-my $postgresonly;
-my $hostname;
-my $gfuser;
-my $gfdir;
-my $mailserver;
-my $noninteractive;
-my $skipdatabasesetup;
-my $force;
-my $nogfpasswd;
-my $admin_email;
-
-my ($rez) = GetOptions(
- #"length=i" => \$length, # numeric
- #"file=s" => \$data, # string
- "verbose" => \$verbose,
- "pg_only" => \$postgresonly,
- "skip_db_setup" => \$skipdatabasesetup,
- "hostname=s" => \$hostname,
- "gfuser=s" => \$gfuser,
- "gfdir=s" => \$gfdir,
- "mailserver=s" => \$mailserver,
- "y|yes" => \$noninteractive,
- "f|force" => \$force,
- "nogfpasswd" => \$nogfpasswd,
- "admin_email=s" => \$admin_email,
-);
-
-# openshift/docker-specific - name of the "pod" executing the installer:
-my $pod_name = "";
-if (exists($ENV{'MY_POD_NAME'}))
-{
- $pod_name = $ENV{'MY_POD_NAME'};
-}
-
-my $jq_exec_path = "";
-my $psql_exec_path = "";
-my $cwd;
-my $WARFILE_LOCATION = "dataverse.war";
-
-
-my @CONFIG_VARIABLES;
-
-if ($postgresonly)
-{
- @CONFIG_VARIABLES =
- ( 'POSTGRES_SERVER', 'POSTGRES_PORT', 'POSTGRES_DATABASE', 'POSTGRES_USER', 'POSTGRES_PASSWORD', 'POSTGRES_ADMIN_PASSWORD' );
-
-}
-else
-{
-
- @CONFIG_VARIABLES = (
- 'HOST_DNS_ADDRESS',
- 'GLASSFISH_USER',
- 'GLASSFISH_DIRECTORY',
- 'ADMIN_EMAIL',
- 'MAIL_SERVER',
-
- 'POSTGRES_SERVER',
- 'POSTGRES_PORT',
- 'POSTGRES_ADMIN_PASSWORD',
- 'POSTGRES_DATABASE',
- 'POSTGRES_USER',
- 'POSTGRES_PASSWORD',
-
- 'SOLR_LOCATION',
-
- 'RSERVE_HOST',
- 'RSERVE_PORT',
- 'RSERVE_USER',
- 'RSERVE_PASSWORD',
-
- 'DOI_USERNAME',
- 'DOI_PASSWORD',
- 'DOI_BASEURL',
- 'DOI_DATACITERESTAPIURL'
-
- );
-}
-
-my %CONFIG_DEFAULTS;
-
-&read_config_defaults("default.config");
-
-my %CONFIG_PROMPTS;
-my %CONFIG_COMMENTS;
-
-&read_interactive_config_values("interactive.config");
-
-my $API_URL = "http://localhost:8080/api";
-
-# jodbc.postgresql.org recommends 4.2 for Java 8.
-# updated drivers may be obtained from
-# https://jdbc.postgresql.org/download.html
-my $postgres_jdbc = "postgresql-42.2.12.jar";
-
-# 0. A few preliminary checks:
-
-# 0a. OS:
-
-my $uname_out = `uname -a`;
-
-my @uname_tokens = split( " ", $uname_out );
-
-my $WORKING_OS;
-if ( $uname_tokens[0] eq "Darwin" ) {
- print "\nThis appears to be a MacOS X system; good.\n";
- # TODO: check the OS version
-
- $WORKING_OS = "MacOSX";
-}
-elsif ( $uname_tokens[0] eq "Linux" ) {
- if ( -f "/etc/redhat-release" ) {
- print "\nThis appears to be a RedHat system; good.\n";
- $WORKING_OS = "RedHat";
- # TODO: check the distro version
- }
- else {
- print "\nThis appears to be a non-RedHat Linux system;\n";
- print "this installation *may* succeed; but we're not making any promises!\n";
- $WORKING_OS = "Linux";
- }
-} else {
- print "\nWARNING: This appears to be neither a Linux or MacOS X system!\n";
- print "This installer script will most likely fail. Please refer to the\n";
- print "DVN Installers Guide for more information.\n\n";
-
- $WORKING_OS = "Unknown";
-
- unless ($noninteractive) {
- exit 0;
- }
-
- print "(Normally we would stop right there; but since the \"--yes\" option was specified, we'll attempt to continue)\n\n";
-
-}
-
-
-# 0b. host name:
-
-if ($hostname) {
- $CONFIG_DEFAULTS{'HOST_DNS_ADDRESS'} = $hostname;
-} else {
- my $hostname_from_cmdline = `hostname`;
- chop $hostname_from_cmdline;
-
- $CONFIG_DEFAULTS{'HOST_DNS_ADDRESS'} = $hostname_from_cmdline;
-}
-
-# 0c. check if there is the default.config file with the pre-set configuration values:
-
-# read default configuration values from tab separated file "default.config" if it exists
-# moved after the $hostname_from_cmdline section to avoid excessively complicating the logic
-# of command line argument, automatic selection, or config file.
-#
-# NOTE: if the file contain any Postgres configuration (for example: "POSTGRES_USER dvnApp")
-# but an environmental variable with the same name exists - THE ENV. VAR WILL WIN! (don't ask)
-# (actually this is to accommodate the openshift docker deployment scenario)
-
-sub trim { my $s = shift; $s =~ s/^\s+|\s+$//g; return $s };
-
-#my $config_default_file = "default.config";
-#
-#if ( -e $config_default_file )
-#{
-# print("loading default configuration values from $config_default_file\n");
-# open( my $inp_cfg, $config_default_file );
-# while( my $ln = <$inp_cfg> )
-# {
-# my @xs = split('\t', $ln );
-# if ( 2 == @xs )
-# {
-# my $k = $xs[0];
-# my $v = trim($xs[1]);
-## if (defined $ENV{$k} && ($k eq "POSTGRES_USER" || $k eq "POSTGRES_PASSWORD")) {
-## $v = $ENV{$k};
-## }
-## if (defined $ENV{'POSTGRESQL_ADMIN_PASSWORD'} && $k eq "POSTGRES_ADMIN_PASSWORD") {
-## $v = $ENV{'POSTGRESQL_ADMIN_PASSWORD'};
-## }
-# $CONFIG_DEFAULTS{$k}=$v;
-# }
-# }
-#}
-#else
-#{
-# print("using hard-coded default configuration values (no $config_default_file available)\n");
-#}
-
-# 0d. current OS user. (the first one we find wins)
-
-my $current_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
-
-# if the username was specified on the command-line, it takes precendence:
-if ($gfuser) {
- print "Using CLI-specified user $gfuser.\n";
- $CONFIG_DEFAULTS{'GLASSFISH_USER'} = $gfuser;
-}
-
-
-if (!$CONFIG_DEFAULTS{'GLASSFISH_USER'} || !$noninteractive) {
- $CONFIG_DEFAULTS{'GLASSFISH_USER'} = $current_user;
- print "using $current_user.\n";
-}
-
-
-# prefer that we not install as root.
-unless ( $< != 0 ) {
- print "####################################################################\n";
- print " It is recommended that this script not be run as root.\n";
- print " Consider creating the service account \"dataverse\", giving it ownership\n";
- print " on the glassfish/domains/domain1/ and glassfish/lib/ directories,\n";
- print " along with the JVM-specified files.dir location, and designate\n";
- print " that account to launch and run the Application Server (Payara),\n";
- print " AND use that user account to run this installer.\n";
- print "####################################################################\n";
-
- unless ($noninteractive)
- {
- print "\nPress any key to continue, or ctrl-C to exit the installer...\n\n";
- system "stty cbreak /dev/tty 2>&1";
- unless ($noninteractive) {
- my $key = getc(STDIN);
- }
- system "stty -cbreak /dev/tty 2>&1";
- print "\n";
- }
-}
-
-# ensure $gfuser exists or bail
-my $gfidcmd="id $CONFIG_DEFAULTS{'GLASSFISH_USER'} > /dev/null";
-my $gfreturncode=system($gfidcmd);
-if ($gfreturncode != 0) {
- die "Couldn't find user $gfuser. Please ensure the account exists and is readable by the user running this installer.\n";
-}
-
-# 0e. the following 2 options can also be specified on the command line, and
-# also take precedence over the default values that are hard-coded and/or
-# provided in the default.config file:
-
-if ($mailserver) {
- $CONFIG_DEFAULTS{'MAIL_SERVER'} = $mailserver;
-}
-
-if ($gfdir) {
- $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} = $gfdir;
-}
-
-# 1. CHECK FOR SOME MANDATORY COMPONENTS (WAR FILE, ETC.)
-# since we can't do anything without these things in place, better check for
-# them before we go into the interactive config mode.
-# (skip if this is a database-only setup)
-
-unless ($postgresonly)
-{
-# 1a. war file:
- print "\nChecking if the application .war file is available... ";
-
-# if this installer section is running out of the installer zip bundle directory,
-# the war file will be sitting right here, named "dataverse.war":
-
- $WARFILE_LOCATION = "dataverse.war";
-
-# but if it's not here, this is probably a personal development
-# setup, so their build should be up in their source tree:
-
- unless ( -f $WARFILE_LOCATION ) {
- my $DATAVERSE_VERSION = "";
- my $DATAVERSE_POM_FILE = "../../modules/dataverse-parent/pom.xml";
- if ( -f $DATAVERSE_POM_FILE )
- {
- open DPF, $DATAVERSE_POM_FILE;
- my $pom_line;
- while ($pom_line=)
- {
- chop $pom_line;
- if ($pom_line =~/^[ \t]*([0-9\.]+)<\/revision>/)
- {
- $DATAVERSE_VERSION=$1;
- last;
- }
- }
- close DPF;
-
- if ($DATAVERSE_VERSION ne "") {
- $WARFILE_LOCATION = "../../target/dataverse-" . $DATAVERSE_VERSION . ".war";
- }
- }
- }
-
-# But, if the war file cannot be found in either of the 2
-# places - we'll just have to give up:
-
- unless ( -f $WARFILE_LOCATION ) {
- print "\nWARNING: Can't find the project .war file!\n";
- print "\tAre you running the installer in the right directory?\n";
- print "\tHave you built the war file?\n";
- print "\t(if not, build the project and run the installer again)\n";
-
- exit 0;
- }
- print " Yes, it is!\n";
-
-
-# 1b. check and remember the working dir:
- chomp( $cwd = `pwd` );
-
-# 1d. jq executable:
-
- my $sys_path = $ENV{'PATH'};
- my @sys_path_dirs = split( ":", $sys_path );
-
- if ( $pod_name ne "start-glassfish") # Why is that again?
- {
- for my $sys_path_dir (@sys_path_dirs) {
- if ( -x $sys_path_dir . "/jq" ) {
- $jq_exec_path = $sys_path_dir;
- last;
- }
- }
- if ( $jq_exec_path eq "" ) {
- print STDERR "\nERROR: I haven't been able to find the jq command in your PATH! Please install it from http://stedolan.github.io/jq/\n";
- exit 1;
-
- }
- }
-
-}
-
-
-# 2. INTERACTIVE CONFIG SECTION:
-
-print "\nWelcome to the Dataverse installer.\n";
-unless ($postgresonly) {
- print "You will be guided through the process of setting up a NEW\n";
- print "instance of the dataverse application\n";
-}
-else {
- print "You will be guided through the process of configuring your\n";
- print "PostgreSQL database for use by the Dataverse application.\n";
-}
-
-my $yesno;
-
-unless ($noninteractive)
-{
- print "\nATTENTION: As of Dataverse v.4.19, we are offering a new, experimental \n";
- print "version of the installer script, implemented in Python. It will eventually \n";
- print "replace this script (implemented in Perl). Consult the file README_python.txt \n";
- print "for more information on how to run it. \n";
-
- print "\nWould you like to exit and use the new installer instead? [y/n] ";
- $yesno = <>;
- chop $yesno;
-
- while ( $yesno ne "y" && $yesno ne "n" ) {
- print "Please enter 'y' or 'n'!\n";
- print "(or ctrl-C to exit the installer)\n";
- $yesno = <>;
- chop $yesno;
- }
-
- exit 0 if $yesno eq "y";
-}
-
-ENTERCONFIG:
-
-print "\n";
-print "Please enter the following configuration values:\n";
-print "(hit [RETURN] to accept the default value)\n";
-print "\n";
-
-for my $ENTRY (@CONFIG_VARIABLES)
-{
- my $config_prompt = $CONFIG_PROMPTS{$ENTRY};
- my $config_comment = $CONFIG_COMMENTS{$ENTRY};
-
- if ( $config_comment eq '' )
- {
- print $config_prompt . ": ";
- print "[" . $CONFIG_DEFAULTS{$ENTRY} . "] ";
- }
- else
- {
- print $config_prompt . $config_comment;
- print "[" . $CONFIG_DEFAULTS{$ENTRY} . "] ";
- }
-
- my $user_entry = "";
-
- # ($noninteractive means the installer is being run in the non-interactive mode; it will use
- # the default values specified so far, without prompting the user for alternative values)\
- unless ($noninteractive)
- {
- $user_entry = <>;
- chop $user_entry;
-
- if ( $user_entry ne "" ) {
- $CONFIG_DEFAULTS{$ENTRY} = $user_entry;
- }
-
- # for some values, we'll try to do some validation right here, in real time:
-
- if ($ENTRY eq 'ADMIN_EMAIL')
- {
- $user_entry = $CONFIG_DEFAULTS{$ENTRY};
- my $attempts = 0;
- while ($user_entry !~/[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}/)
- {
- $attempts++;
- print "Please enter a valid email address: ";
- $user_entry = <>;
- chop $user_entry;
- }
-
- if ($attempts)
- {
- print "OK, looks legit.\n";
- $CONFIG_DEFAULTS{$ENTRY} = $user_entry;
- }
- }
- elsif ($ENTRY eq 'GLASSFISH_DIRECTORY')
- {
- # CHECK IF GLASSFISH DIR LOOKS OK:
- print "\nChecking your Glassfish installation...";
-
- my $g_dir = $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'};
-
-
- unless ( -d $g_dir . "/glassfish/domains/domain1" )
- {
- while ( !( -d $g_dir . "/glassfish/domains/domain1" ) )
- {
- print "\nInvalid Glassfish directory " . $g_dir . "!\n";
- print "Enter the root directory of your Glassfish installation:\n";
- print "(Or ctrl-C to exit the installer): ";
-
- $g_dir = <>;
- chop $g_dir;
- }
- }
-
- # verify that we can write in the Glassfish directory
- # (now that we are no longer requiring to run the installer as root)
-
- my @g_testdirs = ( "/glassfish/domains/domain1",
- "/glassfish/domains/domain1/config",
- "/glassfish/lib");
-
- for my $test_dir (@g_testdirs)
- {
- if (!(-w ($g_dir . $test_dir)))
- {
- print "\n";
- die("ERROR: " . $g_dir . $test_dir . " not writable to the user running the installer! Check permissions on Payara5 hierarchy.\n");
- }
- }
-
-
-
- print "$g_dir looks OK!\n";
- $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} = $g_dir;
-
- }
- elsif ($ENTRY eq 'MAIL_SERVER')
- {
- my $smtp_server = "";
- while (! &validate_smtp_server() )
- {
- print "Enter a valid SMTP (mail) server:\n";
- print "(Or ctrl-C to exit the installer): ";
-
- $smtp_server = <>;
- chop $smtp_server;
-
- $CONFIG_DEFAULTS{'MAIL_SERVER'} = $smtp_server unless $smtp_server eq '';
- }
-
- print "\nOK, we were able to establish connection to the SMTP server you have specified.\n";
- print "Please note that you *may* need to configure some extra settings before your \n";
- print "Dataverse can send email. Please consult the \"Mail Host Configuration & Authentication\"\n";
- print "section of the installation guide (http://guides.dataverse.org/en/latest/installation/installation-main.html)\n";
- print "for more information.\n";
- }
- }
-
- print "\n";
-}
-
-# 2b. CONFIRM VALUES ENTERED:
-
-print "\nOK, please confirm what you've entered:\n\n";
-
-for my $ENTRY (@CONFIG_VARIABLES) {
- print $CONFIG_PROMPTS{$ENTRY} . ": " . $CONFIG_DEFAULTS{$ENTRY} . "\n";
-}
-
-if ($noninteractive) {
- $yesno = "y";
-}
-else {
- print "\nIs this correct? [y/n] ";
- $yesno = <>;
- chop $yesno;
-}
-
-while ( $yesno ne "y" && $yesno ne "n" ) {
- print "Please enter 'y' or 'n'!\n";
- print "(or ctrl-C to exit the installer)\n";
- $yesno = <>;
- chop $yesno;
-}
-
-if ( $yesno eq "n" ) {
- goto ENTERCONFIG;
-}
-
-# 3. SET UP POSTGRES USER AND DATABASE
-
-unless($pod_name eq "start-glassfish" || $pod_name eq "dataverse-glassfish-0" || $skipdatabasesetup) {
- &setup_postgres();
-# (there's no return code - if anything goes wrong, the method will exit the script, with some diagnostic messages for the user)
- print "\nOK, done.\n";
-
- if ($postgresonly)
- {
- exit 0;
- }
-}
-
-# 5. CONFIGURE PAYARA
-
-my $glassfish_dir = $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'};
-
-my $done = &setup_appserver();
-
-# Check if the App is running:
-
-unless ((
- my $exit_code =
- system( $glassfish_dir . "/bin/asadmin list-applications | grep -q '^dataverse'" )
- ) == 0 )
-{
- # If the "asadmin list-applications" has failed, it may only mean that an earlier
- # "asadmin login" had failed, and asadmin is now failing to run without the user
- # supplying the username and password. (And the fact that we are trying to pile the
- # output to grep prevents it from providing the prompts).
- # So before we give up, we'll try an alternative:
-
- unless ((
- my $exit_code_2 =
- system( "curl http://localhost:8080/robots.txt | grep -q '^User-agent'" )
- ) == 0 )
- {
- print STDERR "It appears that the Dataverse application is not running...\n";
- print STDERR "Even though the \"asadmin deploy\" command had succeeded earlier.\n\n";
- print STDERR "Aborting - sorry...\n\n";
-
- exit 1;
- }
-}
-
-
-print "\nOK, the Dataverse application appears to be running...\n\n";
-
-# Run the additional setup scripts, that populate the metadata block field values, create users
-# and dataverses, etc.
-
-unless ( -d "data" && -f "setup-datasetfields.sh" && -f "setup-users.sh" && -f "setup-dvs.sh" && -f "setup-all.sh" ) {
- chdir("../api");
-}
-
-unless ( -d "data" && -f "setup-datasetfields.sh" && -f "setup-users.sh" && -f "setup-dvs.sh" && -f "setup-builtin-roles.sh" && -f "setup-all.sh" ) {
- print "\nERROR: Can't find the metadata and user/dataverse setup scripts!\n";
- print "\tAre you running the installer in the right directory?\n";
- exit 1;
-}
-
-# if there's an admin_email set from arguments, replace the value in `dv-root.json` (called by `setup-all.sh`)
-if ($admin_email)
-{
- print "setting contact email for root dataverse to: $admin_email\n";
- set_root_contact_email( $admin_email );
-}
-else
-{
- print "using default contact email for root dataverse\n";
-}
-
-for my $script ( "setup-all.sh" ) {
- # (there's only 1 setup script to run now - it runs all the other required scripts)
- print "Executing post-deployment setup script " . $script . "... ";
-
- my $my_hostname = $CONFIG_DEFAULTS{'HOST_DNS_ADDRESS'};
-
- # We used to filter the supplied scripts, replacing "localhost" and the port, in
- # case they are running Dataverse on a different port... Now we are simply requiring
- # that the port 8080 is still configured in domain.xml when they are running the
- # installer:
- my $run_script;
- #if ( $my_hostname ne "localhost" ) {
- # system( "sed 's/localhost:8080/$my_hostname/g' < " . $script . " > tmpscript.sh; chmod +x tmpscript.sh" );
- # $run_script = "tmpscript.sh";
- #}
- #else {
- $run_script = $script;
- #}
-
- unless ( my $exit_code = system( "./" . $run_script . " > $run_script.$$.log 2>&1") == 0 )
- {
- print "\nERROR executing script " . $script . "!\n";
- exit 1;
- }
- print "done!\n";
-}
-
-# SOME ADDITIONAL SETTINGS THAT ARE NOT TAKEN CARE OF BY THE setup-all SCRIPT
-# NEED TO BE CONFIGURED HERE:
-
-print "Making additional configuration changes...\n\n";
-
-
-# a. Configure the Admin email in the Dataverse settings:
-
-print "Executing " . "curl -X PUT -d " . $CONFIG_DEFAULTS{'ADMIN_EMAIL'} . " " . $API_URL . "/admin/settings/:SystemEmail" . "\n";
-
-my $exit_code = system("curl -X PUT -d " . $CONFIG_DEFAULTS{'ADMIN_EMAIL'} . " " . $API_URL . "/admin/settings/:SystemEmail");
-if ( $exit_code )
-{
- print "WARNING: failed to configure the admin email in the Dataverse settings!\n\n";
-}
-else
-{
- print "OK.\n\n";
-}
-
-# b. If this installation is going to be using a remote SOLR search engine service, configure its location in the settings:
-
-if ($CONFIG_DEFAULTS{'SOLR_LOCATION'} ne 'LOCAL')
-{
- print "Executing " . "curl -X PUT -d " . $CONFIG_DEFAULTS{'SOLR_LOCATION'} . " " . $API_URL . "/admin/settings/:SolrHostColonPort" . "\n";
- my $exit_code = system("curl -X PUT -d " . $CONFIG_DEFAULTS{'SOLR_LOCATION'} . " " . $API_URL . "/admin/settings/:SolrHostColonPort");
- if ( $exit_code )
- {
- print "WARNING: failed to configure the location of the remote SOLR service!\n\n";
- }
- else
- {
- print "OK.\n\n";
- }
-}
-
-
-
-chdir($cwd);
-
-print "\n\nYou should now have a running Dataverse instance at\n";
-print " http://" . $CONFIG_DEFAULTS{'HOST_DNS_ADDRESS'} . ":8080\n\n\n";
-
-if ($WARFILE_LOCATION =~/([0-9]\.[0-9]\.[0-9])\.war$/)
-{
- my $version = $1;
- print "If this is a personal development installation, we recommend that you undeploy the currently-running copy \n";
- print "of the application, with the following asadmin command:\n\n";
- print "\t" . $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} . '/bin/asadmin undeploy dataverse-' . $version . "\n\n";
- print "before attempting to deploy from your development environment in NetBeans.\n\n";
-}
-
-
-print "\nYour Dataverse has been configured to use DataCite, to register DOI global identifiers in the \n";
-print "test name space \"10.5072\" with the \"shoulder\" \"FK2\"\n";
-print "However, you have to contact DataCite (support\@datacite.org) and request a test account, before you \n";
-print "can publish datasets. Once you receive the account name and password, add them to your domain.xml,\n";
-print "as the following two JVM options:\n";
-print "\t-Ddoi.username=...\n";
-print "\t-Ddoi.password=...\n";
-print "and restart payara5\n";
-print "If this is a production Dataverse and you are planning to register datasets as \n";
-print "\"real\", non-test DOIs or Handles, consult the \"Persistent Identifiers and Publishing Datasets\"\n";
-print "section of the Installataion guide, on how to configure your Dataverse with the proper registration\n";
-print "credentials.\n\n";
-
-
-
-# (going to skip the Rserve check; it's no longer a required, or even a recommended component)
-
-exit 0;
-
-# 9. FINALLY, CHECK IF RSERVE IS RUNNING:
-print "\n\nFinally, checking if Rserve is running and accessible...\n";
-
-unless ( $CONFIG_DEFAULTS{'RSERVE_PORT'} =~ /^[0-9][0-9]*$/ ) {
- print $CONFIG_DEFAULTS{'RSERVE_HOST'} . " does not look like a valid port number,\n";
- print "defaulting to 6311.\n\n";
-
- $CONFIG_DEFAULTS{'RSERVE_PORT'} = 6311;
-}
-
-my ( $rserve_iaddr, $rserve_paddr, $rserve_proto );
-
-unless ( $rserve_iaddr = inet_aton( $CONFIG_DEFAULTS{'RSERVE_HOST'} ) ) {
- print STDERR "Could not look up $CONFIG_DEFAULTS{'RSERVE_HOST'},\n";
- print STDERR "the host you specified as your R server.\n";
- print STDERR "\nDVN can function without a working R server, but\n";
- print STDERR "much of the functionality concerning running statistics\n";
- print STDERR "and analysis on quantitative data will not be available.\n";
- print STDERR "Please consult the Installers guide for more info.\n";
-
- exit 0;
-}
-
-$rserve_paddr = sockaddr_in( $CONFIG_DEFAULTS{'RSERVE_PORT'}, $rserve_iaddr );
-$rserve_proto = getprotobyname('tcp');
-
-unless ( socket( SOCK, PF_INET, SOCK_STREAM, $rserve_proto )
- && connect( SOCK, $rserve_paddr ) )
-{
- print STDERR "Could not establish connection to $CONFIG_DEFAULTS{'RSERVE_HOST'}\n";
- print STDERR "on port $CONFIG_DEFAULTS{'RSERVE_PORT'}, the address you provided\n";
- print STDERR "for your R server.\n";
- print STDERR "DVN can function without a working R server, but\n";
- print STDERR "much of the functionality concerning running statistics\n";
- print STDERR "and analysis on quantitative data will not be available.\n";
- print STDERR "Please consult the \"Installing R\" section in the Installers guide\n";
- print STDERR "for more info.\n";
-
- exit 0;
-
-}
-
-close(SOCK);
-print "\nOK!\n";
-
-# 5. CONFIGURE PAYARA
-sub setup_appserver {
- my $success = 1;
- my $failure = 0;
-
- my $glassfish_dir = $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'};
-
- print "\nProceeding with the app. server (Payara5) setup.\n";
-
-# 5a. DETERMINE HOW MUCH MEMORY TO GIVE TO GLASSFISH AS HEAP:
-
- my $gf_heap_default = "2048m";
- my $sys_mem_total = 0;
-
- if ( -e "/proc/meminfo" && open MEMINFO, "/proc/meminfo" ) {
- # Linux
-
- while ( my $mline = ) {
- if ( $mline =~ /MemTotal:[ \t]*([0-9]*) kB/ ) {
- $sys_mem_total = $1;
- }
- }
-
- close MEMINFO;
-
-# TODO: Figure out how to determine the amount of memory when running in Docker
-# because we're wondering if Dataverse can run in the free OpenShift Online
-# offering that only gives you 1 GB of memory. Obviously, if this is someone's
-# first impression of Dataverse, we want to to run well! What if you try to
-# ingest a large file or perform other memory-intensive operations? For more
-# context, see https://github.com/IQSS/dataverse/issues/4040#issuecomment-331282286
- if ( -e "/sys/fs/cgroup/memory/memory.limit_in_bytes" && open CGROUPMEM, "/sys/fs/cgroup/memory/memory.limit_in_bytes" ) {
- print "INFO: This system has the CGROUP file /sys/fs/cgroup/memory/memory.limit_in_bytes\n";
- while ( my $limitline = ) {
- ### TODO: NO, WE ARE NOT NECESSARILY IN DOCKER!
- ###print "We must be running in Docker! Fancy!\n";
- # The goal of this cgroup check is for
- # "Setting the heap limit for Glassfish/Payara to 750MB"
- # to change to some other value, based on memory available.
- print "INFO: /sys/fs/cgroup/memory/memory.limit_in_bytes: $limitline\n";
- my $limit_in_kb = $limitline / 1024;
- print "INFO: CGROUP limit_in_kb = $limit_in_kb [ignoring]\n";
- # In openshift.json, notice how PostgreSQL and Solr have
- # resources.limits.memory set to "256Mi".
- # If you try to give the Dataverse/Glassfish container twice
- # as much memory (512 MB) and allow $sys_mem_total to
- # be set below, you should see the following:
- # "Setting the heap limit for Glassfish to 192MB."
- # FIXME: dataverse.war will not deploy with only 512 MB of memory.
- # Again, the goal is 1 GB total (512MB + 256MB + 256MB) for
- # Glassfish, PostgreSQL, and Solr to fit in the free OpenShift tier.
- #print "setting sys_mem_total to: $limit_in_kb\n";
- #$sys_mem_total = $limit_in_kb;
- }
- close CGROUPMEM;
- }
- }
- elsif ( -x "/usr/sbin/sysctl" )
- {
- # MacOS X, probably...
-
- $sys_mem_total = `/usr/sbin/sysctl -n hw.memsize`;
- chop $sys_mem_total;
- if ( $sys_mem_total > 0 ) {
- $sys_mem_total = int( $sys_mem_total / 1024 );
- # size in kb
- }
- }
-
- if ( $sys_mem_total > 0 ) {
- # setting the default heap size limit to 3/8 of the available
- # amount of memory:
- $gf_heap_default = ( int( $sys_mem_total / ( 8 / 3 * 1024 ) ) );
-
- print "\nSetting the heap limit for Payara5 to " . $gf_heap_default . "MB. \n";
- print "You may need to adjust this setting to better suit \n";
- print "your system.\n\n";
-
- #$gf_heap_default .= "m";
-
- }
- else
- {
- print "\nCould not determine the amount of memory on your system.\n";
- print "Setting the heap limit for Payara5 to 2GB. You may need \n";
- print "to adjust the value to better suit your system.\n\n";
- }
-
- push @CONFIG_VARIABLES, "DEF_MEM_SIZE";
- $CONFIG_DEFAULTS{"DEF_MEM_SIZE"} = $gf_heap_default;
-
-# TODO:
-# is the below still the case with Payara5?
-# if the system has more than 4GB of memory (I believe), glassfish must
-# be run with the 64 bit flag set explicitly (at least that was the case
-# with the MacOS glassfish build...). Verify, and if still the case,
-# add a check.
-
- print "\n*********************\n";
- print "PLEASE NOTE, SOME OF THE ASADMIN COMMANDS ARE GOING TO FAIL,\n";
- print "FOR EXAMPLE, IF A CONFIGURATION SETTING THAT WE ARE TRYING\n";
- print "TO CREATE ALREADY EXISTS; OR IF A JVM OPTION THAT WE ARE\n";
- print "DELETING DOESN'T. THESE \"FAILURES\" ARE NORMAL!\n";
- print "*********************\n\n";
- print "When/if asadmin asks you to \"Enter admin user name\",\n";
- print "it should be safe to hit return and accept the default\n";
- print "(which is \"admin\").\n";
-
- print "\nPress any key to continue...\n\n";
-
- unless ($noninteractive)
- {
- system "stty cbreak /dev/tty 2>&1";
- unless ($noninteractive) {
- my $key = getc(STDIN);
- }
- system "stty -cbreak /dev/tty 2>&1";
- }
-
- print "\n";
-
-# 5b. start domain, if not running:
-
- my $javacheck = `java -version`;
- my $exitcode = $?;
- unless ( $exitcode == 0 ) {
- print STDERR "$javacheck\n" if $javacheck;
- print STDERR "Do you have java installed?\n";
- exit 1;
- }
- my $DOMAIN = "domain1";
- my $DOMAIN_DOWN =
- `$CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'}/bin/asadmin list-domains | grep "$DOMAIN " | grep "not running"`;
- print STDERR $DOMAIN_DOWN . "\n";
- if ($DOMAIN_DOWN) {
- print "Trying to start domain up...\n";
- if ( $current_user eq $CONFIG_DEFAULTS{'GLASSFISH_USER'} ){
- system( $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} . "/bin/asadmin start-domain domain1" );
- }
- else
- {
- system( "sudo -u $CONFIG_DEFAULTS{'GLASSFISH_USER'} " . $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} . "/bin/asadmin start-domain domain1" );
- }
- # TODO: (?) - retest that the domain is running now?
- }
- else
- {
- print "domain appears to be up...\n";
- }
-
-# 5c. create asadmin login, so that the user doesn't have to enter
-# the username and password for every asadmin command, if
-# access to :4848 is password-protected:
-
- system( $glassfish_dir. "/bin/asadmin login" );
-
-# 5d. configure glassfish using ASADMIN commands:
-
- $success = &run_asadmin_script();
-
-# CHECK EXIT STATUS, BARF IF SETUP SCRIPT FAILED:
-
- unless ($success) {
- print "\nERROR! Failed to configure Payara5 domain!\n";
- print "(see the error messages above - if any)\n";
- print "Aborting...\n";
-
- exit 1;
- }
-
-# 5e. Additional config files:
-
- my $JHOVE_CONFIG = "jhove.conf";
- my $JHOVE_CONF_SCHEMA = "jhoveConfig.xsd";
-
-
- my $JHOVE_CONFIG_DIST = $JHOVE_CONFIG;
- my $JHOVE_CONF_SCHEMA_DIST = $JHOVE_CONF_SCHEMA;
-
-# (if the installer is being run NOT as part of a distribution zipped bundle, but
-# from inside the source tree - adjust the locations of the jhove config files:
-
- unless ( -f $JHOVE_CONFIG ) {
- $JHOVE_CONFIG_DIST = "../../conf/jhove/jhove.conf";
- $JHOVE_CONF_SCHEMA_DIST = "../../conf/jhove/jhoveConfig.xsd";
- }
-
-# but if we can't find the files in either location, it must mean
-# that they are not running the script in the correct directory - so
-# nothing else left for us to do but give up:
-
- unless ( -f $JHOVE_CONFIG_DIST && -f $JHOVE_CONF_SCHEMA_DIST ) {
- print "\nERROR! JHOVE configuration files not found in the config dir!\n";
- print "(are you running the installer in the right directory?\n";
- print "Aborting...\n";
- exit 1;
- }
-
- print "\nCopying additional configuration files... ";
-
- #system( "/bin/cp -f " . $JHOVE_CONF_SCHEMA_DIST . " " . $glassfish_dir . "/glassfish/domains/domain1/config" );
- my $jhove_success = copy ($JHOVE_CONF_SCHEMA_DIST, $glassfish_dir . "/glassfish/domains/domain1/config");
- unless ($jhove_success)
- {
- print "\n*********************\n";
- print "ERROR: failed to copy jhove config file into " . $glassfish_dir . "/glassfish/domains/domain1/config - do you have write permission in that directory?";
- exit 1;
- }
-
-# The JHOVE conf file has an absolute PATH of the JHOVE config schema file (uh, yeah...)
-# - so it may need to be readjusted here:
-
- if ( $glassfish_dir ne "/usr/local/payara5" )
- {
- system( "sed 's:/usr/local/payara5:$glassfish_dir:g' < " . $JHOVE_CONFIG_DIST . " > " . $glassfish_dir . "/glassfish/domains/domain1/config/" . $JHOVE_CONFIG);
- }
- else
- {
- system( "/bin/cp -f " . $JHOVE_CONFIG_DIST . " " . $glassfish_dir . "/glassfish/domains/domain1/config" );
- }
-
- print "done!\n";
-
-# 5f. check if payara is running:
-# TODO.
-
-# 5g. DEPLOY THE APPLICATION:
-
- print "\nAttempting to deploy the application.\n";
- print "Command line: " . $glassfish_dir . "/bin/asadmin deploy " . $WARFILE_LOCATION . "\n";
- unless ((
- my $exit_code =
- system( $glassfish_dir . "/bin/asadmin deploy " . $WARFILE_LOCATION )
- ) == 0 )
- {
- print STDERR "Failed to deploy the application! WAR file: " . $WARFILE_LOCATION . ".\n";
- print STDERR "(exit code: " . $exit_code . ")\n";
- print STDERR "Aborting.\n";
- exit 1;
- }
-
-
- print "Finished configuring Payara and deploying the dataverse application. \n";
-
-
- return $success;
-}
-
-sub run_asadmin_script {
- my $success = 1;
- my $failure = 0;
-
- # We are going to run a standalone shell script with a bunch of asadmin
- # commands to set up all the Payara components for the application.
- # All the parameters must be passed to that script as environmental
- # variables:
-
- $ENV{'GLASSFISH_ROOT'} = $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'};
- $ENV{'GLASSFISH_DOMAIN'} = "domain1";
- $ENV{'ASADMIN_OPTS'} = "";
- $ENV{'MEM_HEAP_SIZE'} = $CONFIG_DEFAULTS{'DEF_MEM_SIZE'};
-
- $ENV{'DB_PORT'} = $CONFIG_DEFAULTS{'POSTGRES_PORT'};
- $ENV{'DB_HOST'} = $CONFIG_DEFAULTS{'POSTGRES_SERVER'};
- $ENV{'DB_NAME'} = $CONFIG_DEFAULTS{'POSTGRES_DATABASE'};
- $ENV{'DB_USER'} = $CONFIG_DEFAULTS{'POSTGRES_USER'};
- $ENV{'DB_PASS'} = $CONFIG_DEFAULTS{'POSTGRES_PASSWORD'};
-
- $ENV{'RSERVE_HOST'} = $CONFIG_DEFAULTS{'RSERVE_HOST'};
- $ENV{'RSERVE_PORT'} = $CONFIG_DEFAULTS{'RSERVE_PORT'};
- $ENV{'RSERVE_USER'} = $CONFIG_DEFAULTS{'RSERVE_USER'};
- $ENV{'RSERVE_PASS'} = $CONFIG_DEFAULTS{'RSERVE_PASSWORD'};
- $ENV{'DOI_BASEURL'} = $CONFIG_DEFAULTS{'DOI_BASEURL'};
- $ENV{'DOI_USERNAME'} = $CONFIG_DEFAULTS{'DOI_USERNAME'};
- $ENV{'DOI_PASSWORD'} = $CONFIG_DEFAULTS{'DOI_PASSWORD'};
- $ENV{'DOI_DATACITERESTAPIURL'} = $CONFIG_DEFAULTS{'DOI_DATACITERESTAPIURL'};
-
- $ENV{'HOST_ADDRESS'} = $CONFIG_DEFAULTS{'HOST_DNS_ADDRESS'};
-
- my ($mail_server_host, $mail_server_port) = split (":", $CONFIG_DEFAULTS{'MAIL_SERVER'});
-
- $ENV{'SMTP_SERVER'} = $mail_server_host;
-
- if ($mail_server_port)
- {
- $ENV{'SMTP_SERVER_PORT'} = $mail_server_port;
- }
-
- $ENV{'FILES_DIR'} =
- $CONFIG_DEFAULTS{'GLASSFISH_DIRECTORY'} . "/glassfish/domains/" . $ENV{'GLASSFISH_DOMAIN'} . "/files";
-
- system("./as-setup.sh");
-
- if ($?) {
- return $failure;
- }
- return $success;
-}
-
-sub create_pg_hash {
- my $pg_username = shift @_;
- my $pg_password = shift @_;
-
- my $encode_line = $pg_password . $pg_username;
-
- # for Redhat:
-
- ##print STDERR "executing /bin/echo -n $encode_line | md5sum\n";
-
- my $hash;
- if ( $WORKING_OS eq "MacOSX" ) {
- $hash = `/bin/echo -n $encode_line | md5`;
- }
- else {
- $hash = `/bin/echo -n $encode_line | md5sum`;
- }
-
- chop $hash;
-
- $hash =~ s/ \-$//;
-
- if ( ( length($hash) != 32 ) || ( $hash !~ /^[0-9a-f]*$/ ) ) {
- print STDERR "Failed to generate a MD5-encrypted password hash for the Postgres database.\n";
- exit 1;
- }
-
- return $hash;
-}
-
-sub validate_smtp_server {
- my ( $mail_server_iaddr, $mail_server__paddr, $mail_server_proto, $mail_server_status );
-
- $mail_server_status = 1;
-
- my $userentry = $CONFIG_DEFAULTS{'MAIL_SERVER'};
- my ($testserver, $testport) = split (":", $userentry);
-
- unless ( $mail_server_iaddr = inet_aton( $testserver ) ) {
- print STDERR "Could not look up $testserver,\n";
- print STDERR "the host you specified as your mail server\n";
- $mail_server_status = 0;
- }
-
- if ($mail_server_status) {
- $testport = 25 unless $testport;
- my $mail_server_paddr = sockaddr_in( $testport, $mail_server_iaddr );
- $mail_server_proto = getprotobyname('tcp');
-
- unless ( socket( SOCK, PF_INET, SOCK_STREAM, $mail_server_proto )
- && connect( SOCK, $mail_server_paddr ) )
- {
- print STDERR "Could not establish connection to $CONFIG_DEFAULTS{'MAIL_SERVER'},\n";
- print STDERR "the address you provided for your Mail server.\n";
- print STDERR "Please select a valid mail server, and try again.\n\n";
-
- $mail_server_status = 0;
- }
-
- close(SOCK);
- }
-
- return $mail_server_status;
-}
-
-# support function for set_root_contact_email
-sub search_replace_file
-{
- my ($infile, $pattern, $replacement, $outfile) = @_;
- open (my $inp, $infile);
- local $/ = undef;
- my $txt = <$inp>;
- close $inp;
- $txt =~s/$pattern/$replacement/g;
- open (my $opf, '>:encoding(UTF-8)', $outfile);
- print $opf $txt;
- close $opf;
- return;
-}
-# set the email address for the default `dataverseAdmin` account
-sub set_root_contact_email
-{
- my ($contact_email) = @_;
- my $config_json = "data/user-admin.json";
- search_replace_file($config_json,"\"email\":\"dataverse\@mailinator.com\"","\"email\":\"$contact_email\"",$config_json);
- return;
-}
-
-
-sub setup_postgres {
- my $pg_local_connection = 0;
- my $pg_major_version = 0;
- my $pg_minor_version = 0;
-
-
-# We'll need to do a few things as the Postgres admin user;
-# We'll assume the name of the admin user is "postgres".
- my $POSTGRES_ADMIN_USER = "postgres";
-
-
-
-##Handling container env
-
- if ($pod_name eq "start-glassfish")
- {
- # When we are in this openshift "start-glassfish" pod, we get all the
- # Postgres configuration from the environmental variables.
- print "Init container starting \n";
- $CONFIG_DEFAULTS{'POSTGRES_SERVER'} = $ENV{"POSTGRES_SERVER"} . "." . $ENV{"POSTGRES_SERVICE_HOST"};
- $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} = $ENV{"POSTGRES_DATABASE"};
- $CONFIG_DEFAULTS{'POSTGRES_USER'} = $ENV{"POSTGRES_USER"};
- $CONFIG_DEFAULTS{'POSTGRES_ADMIN_PASSWORD'} = $ENV{"POSTGRES_ADMIN_PASSWORD"};
- # there was a weird case of the postgres admin password option spelled differently in openshift.json
- # - as "POSTGRESQL_ADMIN_PASSWORD"; I'm going to change it in openshift.json - but I'm leaving this
- # next line here, just in case: (L.A. -- Sept. 2018)
- $CONFIG_DEFAULTS{'POSTGRES_ADMIN_PASSWORD'} = $ENV{'POSTGRESQL_ADMIN_PASSWORD'};
- $CONFIG_DEFAULTS{'POSTGRES_PASSWORD'} = $ENV{"POSTGRES_PASSWORD"};
- }
-
- if ( $CONFIG_DEFAULTS{'POSTGRES_SERVER'} eq 'localhost' || $CONFIG_DEFAULTS{'POSTGRES_SERVER'} eq '127.0.0.1' )
- {
- $pg_local_connection = 1;
- }
-# elsif ($postgresonly)
-# {
-# print "In the --pg_only mode the script can only be run LOCALLY,\n";
-# print "i.e., on the server where PostgresQL is running, with the\n";
-# print "Postgres server address as localhost - \"127.0.0.1\".\n";
-# exit 1;
-# }
-
-#If it is executing in a container, proceed easy with this all-in-one block
-
-
-
-
-# 3b. LOCATE THE psql EXECUTABLE:
-
- if ( $pod_name eq "start-glassfish"){
- $psql_exec_path = "/usr/bin"
- }
- else
- {
- my $sys_path = $ENV{'PATH'};
- my @sys_path_dirs = split( ":", $sys_path );
-
- for my $sys_path_dir (@sys_path_dirs) {
-
- if ( -x $sys_path_dir . "/psql" ) {
- $psql_exec_path = $sys_path_dir;
-
- last;
- }
- }
- }
-
- my $psql_major_version = 0;
- my $psql_minor_version = 0;
-
-# 3c. IF PSQL WAS FOUND IN THE PATH, CHECK ITS VERSION:
-
- unless ( $psql_exec_path eq "" ) {
- open( PSQLOUT, $psql_exec_path . "/psql --version|" );
-
- my $psql_version_line = ;
- chop $psql_version_line;
- close PSQLOUT;
-
- my ( $postgresName, $postgresNameLong, $postgresVersion ) = split( " ", $psql_version_line );
-
- unless ( $postgresName eq "psql" && $postgresVersion =~ /^[0-9][0-9\.]*$/ ) {
- print STDERR "\nWARNING: Unexpected output from psql command!\n";
- }
- else
- {
- my (@psql_version_tokens) = split( '\.', $postgresVersion );
-
- print "\n\nFound Postgres psql command, version $postgresVersion.\n\n";
-
- $psql_major_version = $psql_version_tokens[0];
- $psql_minor_version = $psql_version_tokens[1];
-
- $pg_major_version = $psql_major_version;
- $pg_minor_version = $psql_minor_version;
-
- }
- }
-
-# a frequent problem with MacOSX is that the copy of psql found in the PATH
-# belongs to the older version of PostgresQL supplied with the OS, which happens
-# to be incompatible with the newer builds from the Postgres project; which are
-# recommended to be used with Dataverse. So if this is a MacOSX box, we'll
-# check what other versions of PG are available, and select the highest version
-# we can find:
-
- if ( $WORKING_OS eq "MacOSX" ) {
- my $macos_pg_major_version = 0;
- my $macos_pg_minor_version = 0;
-
- for $macos_pg_minor_version ( "9", "8", "7", "6", "5", "4", "3", "2", "1", "0" ) {
- if ( -x "/Library/PostgreSQL/9." . $macos_pg_minor_version . "/bin/psql" ) {
- $macos_pg_major_version = 9;
- if ( ( $macos_pg_major_version > $psql_major_version )
- || ( $macos_pg_minor_version >= $psql_minor_version ) )
- {
- $psql_exec_path = "/Library/PostgreSQL/9." . $macos_pg_minor_version . "/bin";
- $pg_major_version = $macos_pg_major_version;
- $pg_minor_version = $macos_pg_minor_version;
- }
- last;
- }
- }
- }
-
- my $psql_admin_exec = "";
-
- if ( $psql_exec_path eq "" )
- {
- if ( $pg_local_connection || $noninteractive)
- {
- print STDERR "\nERROR: I haven't been able to find the psql command in your PATH!\n";
- print STDERR "Please make sure PostgresQL is properly installed; if necessary, add\n";
- print STDERR "the location of psql to the PATH, then try again.\n\n";
-
- exit 1;
- }
- else
- {
- print "WARNING: I haven't been able to find the psql command in your PATH!\n";
- print "But since we are configuring a Dataverse instance to use a remote Postgres server,\n";
- print "we can still set up the database by running a setup script on that remote server\n";
- print "(see below for instructions).\n";
-
- }
- } else {
-
- print "(Using psql version " . $pg_major_version . "." . $pg_minor_version . ": " . $psql_exec_path . "/psql)\n";
-
-
- $psql_admin_exec = "PGPASSWORD=" . $CONFIG_DEFAULTS{'POSTGRES_ADMIN_PASSWORD'} . "; export PGPASSWORD; " . $psql_exec_path;
- $psql_exec_path = "PGPASSWORD=" . $CONFIG_DEFAULTS{'POSTGRES_PASSWORD'} . "; export PGPASSWORD; " . $psql_exec_path;
-
- print "Checking if we can talk to Postgres as the admin user...\n";
- }
-
-# 3d. CHECK IF WE CAN TALK TO POSTGRES AS THE ADMIN:
-
- if ($psql_exec_path eq "" || system( $psql_admin_exec . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER . " -d postgres -c 'SELECT * FROM pg_roles' > /dev/null 2>&1" ) )
- {
- # No, we can't. :(
- if ($pg_local_connection || $noninteractive)
- {
- # If Postgres is running locally, this is a fatal condition.
- # We'll give them some (potentially) helpful pointers and exit.
-
- print "(Tried executing: " . $psql_admin_exec . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER . " -d postgres -c 'SELECT * FROM pg_roles' > /dev/null 2>&1) \n";
- print "Nope, I haven't been able to connect to the local instance of PostgresQL as the admin user.\n";
- print "\nIs postgresql running? \n";
- print " On a RedHat-like system, you can check the status of the daemon with\n\n";
- print " service postgresql start\n\n";
- print " On MacOSX, use Applications -> PostgresQL -> Start Server.\n";
- print " (or, if there's no \"Start Server\" item in your PostgresQL folder, \n";
- print " simply restart your MacOSX system!)\n";
- print "\nAlso, please make sure that the daemon is listening to network connections!\n";
- print " - at least on the localhost interface. (See \"Installing Postgres\" section\n";
- print " of the installation manual).\n";
- print "\nFinally, did you supply the correct admin password?\n";
- print " Don't know the admin password for your Postgres installation?\n";
- print " - then simply set the access level to \"trust\" temporarily (for localhost only!)\n";
- print " in your pg_hba.conf file. Again, please consult the \n";
- print " installation manual).\n";
- exit 1;
- }
- else
- {
- # If we are configuring the Dataverse instance to use a Postgres server
- # running on a remote host, it is possible to configure the database
- # without opening remote access for the admin user. They will simply
- # have to run this script in the "postgres-only" mode on that server, locally,
- # then resume the installation here:
- print "(Tried executing: " . $psql_admin_exec . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER . " -d postgres -c 'SELECT * FROM pg_roles' > /dev/null 2>&1)\n\n";
- print "Haven't been able to connect to the remote Postgres server as the admin user.\n";
- print "(Or you simply don't have psql installed on this server)\n";
- print "It IS possible to configure a database for your Dataverse on a remote server,\n";
- print "without having admin access to that remote Postgres installation.\n\n";
- print "In order to do that, please copy the installer (the entire package) to the server\n";
- print "where PostgresQL is running and run the installer with the \"--pg_only\" option:\n\n";
- print " ./install --pg_only\n\n";
-
- print "Press any key to continue the installation process once that has been\n";
- print "done. Or press ctrl-C to exit the installer.\n\n";
-
- system "stty cbreak /dev/tty 2>&1";
- my $key = getc(STDIN);
- system "stty -cbreak /dev/tty 2>&1";
- print "\n";
- }
- }
- else
- {
- print "Yes, we can!\n";
-
- # ok, we can proceed with configuring things...
-
- print "\nConfiguring Postgres Database:\n";
-
- # 4c. CHECK IF THIS DB ALREADY EXISTS:
-
- my $psql_command_dbcheck =
- $psql_admin_exec . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER . " -c '' -d " . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} . ">/dev/null 2>&1";
-
- if ( ( my $exitcode = system($psql_command_dbcheck) ) == 0 )
- {
- if ($force)
- {
- print "WARNING! Database "
- . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'}
- . " already exists but --force given... continuing.\n";
- }
- else
- {
- print "WARNING! Database " . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} . " already exists!\n";
-
- if ($noninteractive)
- {
- exit 1;
- }
- else
- {
- print "\nPress any key to continue, or ctrl-C to exit the installer...\n\n";
-
- system "stty cbreak /dev/tty 2>&1";
- my $key = getc(STDIN);
- system "stty -cbreak /dev/tty 2>&1";
- print "\n";
-
- }
- }
- }
-
- # 3e. CHECK IF THIS USER ALREADY EXISTS:
-
- my $psql_command_rolecheck =
- $psql_exec_path . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -c '' -d postgres " . $CONFIG_DEFAULTS{'POSTGRES_USER'} . " >/dev/null 2>&1";
- my $exitcode;
-
- if ( ( $exitcode = system($psql_command_rolecheck) ) == 0 )
- {
- print "User (role) " . $CONFIG_DEFAULTS{'POSTGRES_USER'} . " already exists;\n";
- print "Proceeding.";
- }
- else
- {
- # 3f. CREATE DVN DB USER:
-
- print "\nCreating Postgres user (role) for the DVN:\n";
-
- open TMPCMD, ">/tmp/pgcmd.$$.tmp";
-
- # with md5-encrypted password:
- my $pg_password_md5 =
- &create_pg_hash( $CONFIG_DEFAULTS{'POSTGRES_USER'}, $CONFIG_DEFAULTS{'POSTGRES_PASSWORD'} );
- my $sql_command =
- "CREATE ROLE \""
- . $CONFIG_DEFAULTS{'POSTGRES_USER'}
- . "\" PASSWORD 'md5"
- . $pg_password_md5
- . "' NOSUPERUSER CREATEDB CREATEROLE INHERIT LOGIN";
-
- print TMPCMD $sql_command;
- close TMPCMD;
-
- my $psql_commandline = $psql_admin_exec . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER . " -d postgres -f /tmp/pgcmd.$$.tmp >/dev/null 2>&1";
-
- my $out = qx($psql_commandline 2>&1);
- $exitcode = $?;
- unless ( $exitcode == 0 )
- {
- print STDERR "Could not create the DVN Postgres user role!\n";
- print STDERR "(SQL: " . $sql_command . ")\n";
- print STDERR "(psql exit code: " . $exitcode . ")\n";
- print STDERR "(STDERR and STDOUT was: " . $out . ")\n";
- exit 1;
- }
-
- unlink "/tmp/pgcmd.$$.tmp";
- print "done.\n";
- }
-
- # 3g. CREATE DVN DB:
-
- print "\nCreating Postgres database:\n";
-
- my $psql_command =
- $psql_admin_exec
- . "/createdb -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $POSTGRES_ADMIN_USER ." "
- . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} . " --owner="
- . $CONFIG_DEFAULTS{'POSTGRES_USER'};
-
- my $out = qx($psql_command 2>&1);
- $exitcode = $?;
- unless ( $exitcode == 0 )
- {
- print STDERR "Could not create Postgres database for the Dataverse app!\n";
- print STDERR "(command: " . $psql_command . ")\n";
- print STDERR "(psql exit code: " . $exitcode . ")\n";
- print STDERR "(STDOUT and STDERR: " . $out . ")\n";
- if ($force)
- {
- print STDERR "\ncalled with --force, continuing\n";
- }
- else
- {
- print STDERR "\naborting the installation (sorry!)\n\n";
- exit 1;
- }
- }
- }
-
-# Whether the user and the database were created locally or remotely, we'll now
-# verify that we can talk to that database, with the credentials of the database
-# user that we want the Dataverse application to be using:
-
- if ( $psql_exec_path ne "" && system( $psql_exec_path . "/psql -h " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . " -p " . $CONFIG_DEFAULTS{'POSTGRES_PORT'} . " -U " . $CONFIG_DEFAULTS{'POSTGRES_USER'} . " -d " . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} . " -c 'SELECT * FROM pg_roles' > /dev/null 2>&1" ) )
- {
- print STDERR "Oops, haven't been able to connect to the database " . $CONFIG_DEFAULTS{'POSTGRES_DATABASE'} . ",\n";
- print STDERR "running on " . $CONFIG_DEFAULTS{'POSTGRES_SERVER'} . ", as user " . $CONFIG_DEFAULTS{'POSTGRES_USER'} . ".\n\n";
- print STDERR "Aborting the installation (sorry!)\n";
- exit 1;
- }
-}
-
-sub read_config_defaults {
- my $config_file = shift @_;
-
- unless ( -f $config_file )
- {
- print STDERR "Can't find the config file " . $config_file . "!\n";
- exit 1;
- }
-
- open CF, $config_file || die "Can't open config file " . $config_file . " for reading.\n";
-
- while ()
- {
- chop;
-
- if ( $_ =~/^[A-Z]/ && $_ =~/ *= */ )
- {
- my ($name, $value) = split(/ *= */, $_, 2);
- $CONFIG_DEFAULTS{$name} = $value;
- }
- }
- close CF;
-}
-
-sub read_interactive_config_values {
- my $config_file = shift @_;
-
- unless ( -f $config_file )
- {
- print STDERR "Can't find the config file " . $config_file . "!\n";
- exit 1;
- }
-
- open CF, $config_file || die "Can't open config file " . $config_file . " for reading.\n";
-
- my $mode = "";
-
- while ()
- {
- chop;
-
- if ( $_ eq "[prompts]" || $_ eq "[comments]" )
- {
- $mode = $_;
- }
-
- if ( $_ =~/^[A-Z]/ && $_ =~/ *= */ )
- {
- my ($name, $value) = split(/ *= */, $_, 2);
-
- if ( $mode eq "[prompts]" )
- {
- $CONFIG_PROMPTS{$name} = $value;
- }
- elsif ( $mode eq "[comments]" )
- {
- $value =~s/\\n/\n/g;
- $CONFIG_COMMENTS{$name} = $value;
- }
- }
- }
- close CF;
-}
diff --git a/scripts/installer/install.py b/scripts/installer/install.py
index 5acb4d760a4..a7f11f352ed 100644
--- a/scripts/installer/install.py
+++ b/scripts/installer/install.py
@@ -314,7 +314,7 @@
gfDir = config.get('glassfish', 'GLASSFISH_DIRECTORY')
while not test_appserver_directory(gfDir):
print("\nInvalid Payara directory!")
- gfDir = read_user_input("Enter the root directory of your Payara5 installation:\n(Or ctrl-C to exit the installer): ")
+ gfDir = read_user_input("Enter the root directory of your Payara installation:\n(Or ctrl-C to exit the installer): ")
config.set('glassfish', 'GLASSFISH_DIRECTORY', gfDir)
elif option == "mail_server":
mailServer = config.get('system', 'MAIL_SERVER')
@@ -511,12 +511,12 @@
try:
copy2(jhoveConfigSchemaDist, gfConfigDir)
# The JHOVE conf file has an absolute PATH of the JHOVE config schema file (uh, yeah...)
- # and may need to be adjusted, if Payara is installed anywhere other than /usr/local/payara5:
- if gfDir == "/usr/local/payara5":
+ # and may need to be adjusted, if Payara is installed anywhere other than /usr/local/payara6:
+ if gfDir == "/usr/local/payara6":
copy2(jhoveConfigDist, gfConfigDir)
else:
- # use sed to replace /usr/local/payara5 in the distribution copy with the real gfDir:
- sedCommand = "sed 's:/usr/local/payara5:"+gfDir+":g' < " + jhoveConfigDist + " > " + gfConfigDir + "/" + jhoveConfig
+ # use sed to replace /usr/local/payara6 in the distribution copy with the real gfDir:
+ sedCommand = "sed 's:/usr/local/payara6:"+gfDir+":g' < " + jhoveConfigDist + " > " + gfConfigDir + "/" + jhoveConfig
subprocess.call(sedCommand, shell=True)
print("done.")
diff --git a/scripts/installer/installAppServer.py b/scripts/installer/installAppServer.py
index 8b719ac09d1..698f5ba9a58 100644
--- a/scripts/installer/installAppServer.py
+++ b/scripts/installer/installAppServer.py
@@ -3,7 +3,7 @@
def runAsadminScript(config):
# We are going to run a standalone shell script with a bunch of asadmin
- # commands to set up all the app. server (payara5) components for the application.
+ # commands to set up all the app. server (payara6) components for the application.
# All the parameters must be passed to that script as environmental
# variables:
os.environ['GLASSFISH_DOMAIN'] = "domain1";
diff --git a/scripts/installer/installUtils.py b/scripts/installer/installUtils.py
index 7cc368de5f8..ff5e6eb708d 100644
--- a/scripts/installer/installUtils.py
+++ b/scripts/installer/installUtils.py
@@ -57,7 +57,7 @@ def test_appserver_directory(directory):
#print("version: major: "+str(major_version)+", minor: "+str(minor_version))
- if major_version != 5 or minor_version < 201:
+ if major_version != 6 or minor_version < 2023:
return False
return True
diff --git a/scripts/installer/interactive.config b/scripts/installer/interactive.config
index 86ea926fe5d..ef8110c554f 100644
--- a/scripts/installer/interactive.config
+++ b/scripts/installer/interactive.config
@@ -26,7 +26,7 @@ DOI_BASEURL = Datacite URL
DOI_DATACITERESTAPIURL = Datacite REST API URL
[comments]
HOST_DNS_ADDRESS = :(enter numeric IP address, if FQDN is unavailable)
-GLASSFISH_USER = :This user will be running the App. Server (Payara5) service on your system.\n - If this is a dev. environment, this should be your own username; \n - In production, we suggest you create the account "dataverse", or use any other unprivileged user account\n:
+GLASSFISH_USER = :This user will be running the App. Server (Payara) service on your system.\n - If this is a dev. environment, this should be your own username; \n - In production, we suggest you create the account "dataverse", or use any other unprivileged user account\n:
GLASSFISH_DIRECTORY =
GLASSFISH_REQUEST_TIMEOUT = :\n Defaults to 1800 seconds (30 minutes)
ADMIN_EMAIL = :\n(please enter a valid email address!)
diff --git a/scripts/tests/ec2-memory-benchmark/ec2-memory-benchmark-remote.sh b/scripts/tests/ec2-memory-benchmark/ec2-memory-benchmark-remote.sh
index 0cfdd20c272..367aa214563 100755
--- a/scripts/tests/ec2-memory-benchmark/ec2-memory-benchmark-remote.sh
+++ b/scripts/tests/ec2-memory-benchmark/ec2-memory-benchmark-remote.sh
@@ -5,7 +5,7 @@ then
EC2_HTTP_LOCATION=""
fi
-DATAVERSE_APP_DIR=/usr/local/payara5/glassfish/domains/domain1/applications/dataverse; export DATAVERSE_APP_DIR
+DATAVERSE_APP_DIR=/usr/local/payara6/glassfish/domains/domain1/applications/dataverse; export DATAVERSE_APP_DIR
# restart app server
diff --git a/scripts/vagrant/setup.sh b/scripts/vagrant/setup.sh
index 0af4afb22af..b446e2dd5dc 100644
--- a/scripts/vagrant/setup.sh
+++ b/scripts/vagrant/setup.sh
@@ -51,7 +51,7 @@ SOLR_USER=solr
echo "Ensuring Unix user '$SOLR_USER' exists"
useradd $SOLR_USER || :
DOWNLOAD_DIR='/dataverse/downloads'
-PAYARA_ZIP="$DOWNLOAD_DIR/payara-5.2022.3.zip"
+PAYARA_ZIP="$DOWNLOAD_DIR/payara-6.2023.7.zip"
SOLR_TGZ="$DOWNLOAD_DIR/solr-8.11.1.tgz"
if [ ! -f $PAYARA_ZIP ] || [ ! -f $SOLR_TGZ ]; then
echo "Couldn't find $PAYARA_ZIP or $SOLR_TGZ! Running download script...."
@@ -59,13 +59,13 @@ if [ ! -f $PAYARA_ZIP ] || [ ! -f $SOLR_TGZ ]; then
echo "Done running download script."
fi
PAYARA_USER_HOME=~dataverse
-PAYARA_ROOT=/usr/local/payara5
+PAYARA_ROOT=/usr/local/payara6
if [ ! -d $PAYARA_ROOT ]; then
echo "Copying $PAYARA_ZIP to $PAYARA_USER_HOME and unzipping"
su $PAYARA_USER -s /bin/sh -c "cp $PAYARA_ZIP $PAYARA_USER_HOME"
su $PAYARA_USER -s /bin/sh -c "cd $PAYARA_USER_HOME && unzip -q $PAYARA_ZIP"
- # default.config defaults to /usr/local/payara5 so let's go with that
- rsync -a $PAYARA_USER_HOME/payara5/ $PAYARA_ROOT/
+ # default.config defaults to /usr/local/payara6 so let's go with that
+ rsync -a $PAYARA_USER_HOME/payara6/ $PAYARA_ROOT/
else
echo "$PAYARA_ROOT already exists"
fi
diff --git a/src/main/java/edu/harvard/iq/dataverse/AbstractGlobalIdServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/AbstractGlobalIdServiceBean.java
index 2a3f2d50364..6827ff33530 100644
--- a/src/main/java/edu/harvard/iq/dataverse/AbstractGlobalIdServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/AbstractGlobalIdServiceBean.java
@@ -3,8 +3,8 @@
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.io.InputStream;
-import javax.ejb.EJB;
-import javax.inject.Inject;
+import jakarta.ejb.EJB;
+import jakarta.inject.Inject;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
diff --git a/src/main/java/edu/harvard/iq/dataverse/AlternativePersistentIdentifier.java b/src/main/java/edu/harvard/iq/dataverse/AlternativePersistentIdentifier.java
index 6fc7262925a..db3c6029a78 100644
--- a/src/main/java/edu/harvard/iq/dataverse/AlternativePersistentIdentifier.java
+++ b/src/main/java/edu/harvard/iq/dataverse/AlternativePersistentIdentifier.java
@@ -3,14 +3,14 @@
import java.io.Serializable;
import java.util.Date;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Temporal;
-import javax.persistence.TemporalType;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Temporal;
+import jakarta.persistence.TemporalType;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/ApiTokenPage.java b/src/main/java/edu/harvard/iq/dataverse/ApiTokenPage.java
index 4838847e400..16ff4d266d8 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ApiTokenPage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ApiTokenPage.java
@@ -5,14 +5,14 @@
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.api.Util;
-import java.sql.Timestamp;
+
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.faces.view.ViewScoped;
-import javax.inject.Inject;
-import javax.inject.Named;
+import jakarta.ejb.EJB;
+import jakarta.faces.view.ViewScoped;
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
/**
* @todo Rename this to ApiTokenFragment? The separate page is being taken out
diff --git a/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFile.java b/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFile.java
index 344032ef5e3..d03ebbc6f7b 100644
--- a/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFile.java
+++ b/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFile.java
@@ -4,16 +4,16 @@
import edu.harvard.iq.dataverse.util.BundleUtil;
import java.io.Serializable;
import java.util.MissingResourceException;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.NamedNativeQueries;
-import javax.persistence.NamedNativeQuery;
-import javax.persistence.NamedQueries;
-import javax.persistence.NamedQuery;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.NamedNativeQueries;
+import jakarta.persistence.NamedNativeQuery;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFileServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFileServiceBean.java
index 05f3e209632..8c96f98ce39 100644
--- a/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFileServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/AuxiliaryFileServiceBean.java
@@ -14,19 +14,19 @@
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.PersistenceContext;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
-import javax.ws.rs.ClientErrorException;
-import javax.ws.rs.InternalServerErrorException;
-import javax.ws.rs.ServerErrorException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.Query;
+import jakarta.persistence.TypedQuery;
+import jakarta.ws.rs.ClientErrorException;
+import jakarta.ws.rs.InternalServerErrorException;
+import jakarta.ws.rs.ServerErrorException;
+import jakarta.ws.rs.core.MediaType;
+import jakarta.ws.rs.core.Response;
import org.apache.tika.Tika;
diff --git a/src/main/java/edu/harvard/iq/dataverse/BannerMessage.java b/src/main/java/edu/harvard/iq/dataverse/BannerMessage.java
index 4f465168580..214e26965fa 100644
--- a/src/main/java/edu/harvard/iq/dataverse/BannerMessage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/BannerMessage.java
@@ -4,13 +4,13 @@
import edu.harvard.iq.dataverse.util.BundleUtil;
import java.io.Serializable;
import java.util.Collection;
-import javax.persistence.CascadeType;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.OneToMany;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.OneToMany;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/BannerMessageServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/BannerMessageServiceBean.java
index 91b4128c545..0e757998d58 100644
--- a/src/main/java/edu/harvard/iq/dataverse/BannerMessageServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/BannerMessageServiceBean.java
@@ -10,10 +10,10 @@
import java.util.Date;
import java.util.List;
import java.util.logging.Logger;
-import javax.ejb.Stateless;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.PersistenceContext;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.PersistenceContext;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/BannerMessageText.java b/src/main/java/edu/harvard/iq/dataverse/BannerMessageText.java
index dbae9a6dc27..ea2dd1b41fc 100644
--- a/src/main/java/edu/harvard/iq/dataverse/BannerMessageText.java
+++ b/src/main/java/edu/harvard/iq/dataverse/BannerMessageText.java
@@ -6,13 +6,13 @@
package edu.harvard.iq.dataverse;
import java.io.Serializable;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/CitationServlet.java b/src/main/java/edu/harvard/iq/dataverse/CitationServlet.java
index f6b4e3dc99a..68c8d49ad7e 100644
--- a/src/main/java/edu/harvard/iq/dataverse/CitationServlet.java
+++ b/src/main/java/edu/harvard/iq/dataverse/CitationServlet.java
@@ -6,14 +6,14 @@
package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.pidproviders.PidUtil;
-import edu.harvard.iq.dataverse.util.StringUtil;
+
import java.io.IOException;
-import java.io.PrintWriter;
-import javax.ejb.EJB;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
+
+import jakarta.ejb.EJB;
+import jakarta.servlet.ServletException;
+import jakarta.servlet.http.HttpServlet;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/ConfigureFragmentBean.java b/src/main/java/edu/harvard/iq/dataverse/ConfigureFragmentBean.java
index d51a73fd2dc..bf509c33995 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ConfigureFragmentBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ConfigureFragmentBean.java
@@ -16,10 +16,10 @@
import java.sql.Timestamp;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.faces.view.ViewScoped;
-import javax.inject.Inject;
-import javax.inject.Named;
+import jakarta.ejb.EJB;
+import jakarta.faces.view.ViewScoped;
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
import java.util.Date;
diff --git a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabAlternate.java b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabAlternate.java
index 5d5d9597746..9542cfe3f71 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabAlternate.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabAlternate.java
@@ -7,15 +7,15 @@
import java.io.Serializable;
import java.util.Objects;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Table;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValue.java b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValue.java
index 181d939f4a1..5dcce98a90f 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValue.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValue.java
@@ -17,16 +17,16 @@
import java.util.Objects;
import java.util.logging.Logger;
import java.util.MissingResourceException;
-import javax.persistence.CascadeType;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.ManyToOne;
-import javax.persistence.OneToMany;
-import javax.persistence.Table;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.OneToMany;
+import jakarta.persistence.Table;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueConverter.java b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueConverter.java
index 1d530e136ba..eadc13721b3 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueConverter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueConverter.java
@@ -5,13 +5,13 @@
*/
package edu.harvard.iq.dataverse;
-import javax.ejb.EJB;
-import javax.enterprise.inject.spi.CDI;
+import jakarta.ejb.EJB;
+import jakarta.enterprise.inject.spi.CDI;
-import javax.faces.component.UIComponent;
-import javax.faces.context.FacesContext;
-import javax.faces.convert.Converter;
-import javax.faces.convert.FacesConverter;
+import jakarta.faces.component.UIComponent;
+import jakarta.faces.context.FacesContext;
+import jakarta.faces.convert.Converter;
+import jakarta.faces.convert.FacesConverter;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueServiceBean.java
index 0e9501414d0..4255c3b2dbc 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ControlledVocabularyValueServiceBean.java
@@ -6,11 +6,11 @@
package edu.harvard.iq.dataverse;
import java.util.List;
-import javax.ejb.Stateless;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.PersistenceContext;
-import javax.persistence.TypedQuery;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.TypedQuery;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/CustomQuestion.java b/src/main/java/edu/harvard/iq/dataverse/CustomQuestion.java
index 64723fff79a..2cb6f27c3e4 100644
--- a/src/main/java/edu/harvard/iq/dataverse/CustomQuestion.java
+++ b/src/main/java/edu/harvard/iq/dataverse/CustomQuestion.java
@@ -1,7 +1,7 @@
package edu.harvard.iq.dataverse;
import java.io.Serializable;
import java.util.List;
-import javax.persistence.*;
+import jakarta.persistence.*;
import org.hibernate.validator.constraints.NotBlank;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/CustomQuestionResponse.java b/src/main/java/edu/harvard/iq/dataverse/CustomQuestionResponse.java
index 32af06014a7..f19ee3c3fc7 100644
--- a/src/main/java/edu/harvard/iq/dataverse/CustomQuestionResponse.java
+++ b/src/main/java/edu/harvard/iq/dataverse/CustomQuestionResponse.java
@@ -7,8 +7,8 @@
import java.io.Serializable;
import java.util.List;
-import javax.faces.model.SelectItem;
-import javax.persistence.*;
+import jakarta.faces.model.SelectItem;
+import jakarta.persistence.*;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/CustomQuestionValue.java b/src/main/java/edu/harvard/iq/dataverse/CustomQuestionValue.java
index a5329c8b96d..f3a6b83b53f 100644
--- a/src/main/java/edu/harvard/iq/dataverse/CustomQuestionValue.java
+++ b/src/main/java/edu/harvard/iq/dataverse/CustomQuestionValue.java
@@ -1,7 +1,7 @@
package edu.harvard.iq.dataverse;
import java.io.Serializable;
-import javax.persistence.*;
+import jakarta.persistence.*;
import org.hibernate.validator.constraints.NotBlank;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/CustomizationFilesServlet.java b/src/main/java/edu/harvard/iq/dataverse/CustomizationFilesServlet.java
index 713d365ba0f..9dd524127d7 100644
--- a/src/main/java/edu/harvard/iq/dataverse/CustomizationFilesServlet.java
+++ b/src/main/java/edu/harvard/iq/dataverse/CustomizationFilesServlet.java
@@ -14,13 +14,13 @@
import java.io.PrintWriter;
import java.nio.file.Path;
import java.nio.file.Paths;
-import javax.servlet.ServletException;
-import javax.servlet.annotation.WebServlet;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
+import jakarta.servlet.ServletException;
+import jakarta.servlet.annotation.WebServlet;
+import jakarta.servlet.http.HttpServlet;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
-import javax.ejb.EJB;
+import jakarta.ejb.EJB;
import org.apache.commons.io.IOUtils;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterCache.java b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterCache.java
index 7ccd4adb78f..7c75b1a4da6 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterCache.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterCache.java
@@ -7,14 +7,14 @@
import java.io.Serializable;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Lob;
-import javax.persistence.NamedQueries;
-import javax.persistence.NamedQuery;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Lob;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
import org.hibernate.validator.constraints.NotBlank;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java
index b748897dafe..9ecc4a3ecc9 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java
@@ -18,11 +18,11 @@
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
-import javax.persistence.EntityManager;
-import javax.persistence.PersistenceContext;
-import javax.persistence.TypedQuery;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.TypedQuery;
import edu.harvard.iq.dataverse.settings.JvmSettings;
import org.apache.commons.text.StringEscapeUtils;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteServiceBean.java
index fa0a745d80f..48786b41824 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteServiceBean.java
@@ -3,7 +3,6 @@
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
-import java.util.ArrayList;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
@@ -11,8 +10,8 @@
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
import edu.harvard.iq.dataverse.settings.JvmSettings;
import org.apache.commons.httpclient.HttpException;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DOIEZIdServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DOIEZIdServiceBean.java
index d9b0fde15da..86b74b72f30 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DOIEZIdServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DOIEZIdServiceBean.java
@@ -7,7 +7,7 @@
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.Stateless;
+import jakarta.ejb.Stateless;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DashboardPage.java b/src/main/java/edu/harvard/iq/dataverse/DashboardPage.java
index 99c7951c96e..c37c3f52bc7 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DashboardPage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DashboardPage.java
@@ -5,23 +5,21 @@
*/
package edu.harvard.iq.dataverse;
-import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.harvest.client.HarvestingClient;
import edu.harvard.iq.dataverse.harvest.client.HarvestingClientServiceBean;
import edu.harvard.iq.dataverse.harvest.server.OAISet;
import edu.harvard.iq.dataverse.harvest.server.OAISetServiceBean;
-import static edu.harvard.iq.dataverse.util.JsfHelper.JH;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.util.List;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.faces.application.FacesMessage;
-import javax.faces.context.FacesContext;
-import javax.faces.view.ViewScoped;
-import javax.inject.Inject;
-import javax.inject.Named;
+import jakarta.ejb.EJB;
+import jakarta.faces.application.FacesMessage;
+import jakarta.faces.context.FacesContext;
+import jakarta.faces.view.ViewScoped;
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataCitation.java b/src/main/java/edu/harvard/iq/dataverse/DataCitation.java
index 30e03046822..9b4b89db44f 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataCitation.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataCitation.java
@@ -14,7 +14,6 @@
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
-import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
@@ -27,7 +26,7 @@
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-import javax.ejb.EJBException;
+import jakarta.ejb.EJBException;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFile.java b/src/main/java/edu/harvard/iq/dataverse/DataFile.java
index 4e323496188..0f83ae3c5c8 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFile.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFile.java
@@ -29,10 +29,10 @@
import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import javax.json.Json;
-import javax.json.JsonArrayBuilder;
-import javax.persistence.*;
-import javax.validation.constraints.Pattern;
+import jakarta.json.Json;
+import jakarta.json.JsonArrayBuilder;
+import jakarta.persistence.*;
+import jakarta.validation.constraints.Pattern;
import org.hibernate.validator.constraints.NotBlank;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileCategory.java b/src/main/java/edu/harvard/iq/dataverse/DataFileCategory.java
index f569a69b13a..f5abe9ac78a 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileCategory.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileCategory.java
@@ -10,16 +10,16 @@
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToMany;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToMany;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Table;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileCategoryServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DataFileCategoryServiceBean.java
index 3fa4691a6dd..29dcb22c3ec 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileCategoryServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileCategoryServiceBean.java
@@ -3,8 +3,8 @@
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileConverter.java b/src/main/java/edu/harvard/iq/dataverse/DataFileConverter.java
index 18531f5203d..701e826f12e 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileConverter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileConverter.java
@@ -1,13 +1,13 @@
package edu.harvard.iq.dataverse;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.enterprise.inject.spi.CDI;
+import jakarta.ejb.EJB;
+import jakarta.enterprise.inject.spi.CDI;
-import javax.faces.component.UIComponent;
-import javax.faces.context.FacesContext;
-import javax.faces.convert.Converter;
-import javax.faces.convert.FacesConverter;
+import jakarta.faces.component.UIComponent;
+import jakarta.faces.context.FacesContext;
+import jakarta.faces.convert.Converter;
+import jakarta.faces.convert.FacesConverter;
@FacesConverter("dataFileConverter")
public class DataFileConverter implements Converter {
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
index c30bfce368a..98ee3351458 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
@@ -21,16 +21,16 @@
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
-import javax.ejb.TransactionAttribute;
-import javax.ejb.TransactionAttributeType;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.PersistenceContext;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
+import jakarta.ejb.TransactionAttribute;
+import jakarta.ejb.TransactionAttributeType;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.Query;
+import jakarta.persistence.TypedQuery;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileTag.java b/src/main/java/edu/harvard/iq/dataverse/DataFileTag.java
index 275d47cf1de..f4f66d3c874 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileTag.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileTag.java
@@ -11,15 +11,15 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Table;
import org.apache.commons.lang3.StringUtils;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataTable.java b/src/main/java/edu/harvard/iq/dataverse/DataTable.java
index 614e7394583..a17d8c65138 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataTable.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataTable.java
@@ -7,26 +7,23 @@
package edu.harvard.iq.dataverse;
import java.io.Serializable;
-import java.util.ArrayList;
import java.util.List;
-import javax.persistence.CascadeType;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.OneToMany;
-import javax.validation.constraints.Size;
-import javax.persistence.OrderBy;
-import org.hibernate.validator.constraints.NotBlank;
-import org.hibernate.validator.constraints.URL;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.OneToMany;
+import jakarta.validation.constraints.Size;
+import jakarta.persistence.OrderBy;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import java.util.Objects;
-import javax.persistence.Column;
-import javax.persistence.Index;
-import javax.persistence.Table;
+import jakarta.persistence.Column;
+import jakarta.persistence.Index;
+import jakarta.persistence.Table;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataTagsAPITestingBean.java b/src/main/java/edu/harvard/iq/dataverse/DataTagsAPITestingBean.java
index 2f987dde82b..713c86190fc 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataTagsAPITestingBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataTagsAPITestingBean.java
@@ -5,11 +5,11 @@
import java.io.Serializable;
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.enterprise.context.SessionScoped;
-import javax.faces.context.FacesContext;
-import javax.inject.Named;
-import javax.json.JsonObject;
+import jakarta.ejb.EJB;
+import jakarta.enterprise.context.SessionScoped;
+import jakarta.faces.context.FacesContext;
+import jakarta.inject.Named;
+import jakarta.json.JsonObject;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataTagsContainer.java b/src/main/java/edu/harvard/iq/dataverse/DataTagsContainer.java
index 5cf9c623bde..eeda70c1f17 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataTagsContainer.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataTagsContainer.java
@@ -1,7 +1,7 @@
package edu.harvard.iq.dataverse;
-import javax.ejb.Stateless;
-import javax.json.JsonObject;
+import jakarta.ejb.Stateless;
+import jakarta.json.JsonObject;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataset.java b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
index f9c839a0fff..620e66c6c54 100644
--- a/src/main/java/edu/harvard/iq/dataverse/Dataset.java
+++ b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
@@ -17,22 +17,22 @@
import java.util.List;
import java.util.Objects;
import java.util.Set;
-import javax.persistence.CascadeType;
-import javax.persistence.Entity;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.NamedQueries;
-import javax.persistence.NamedQuery;
-import javax.persistence.NamedStoredProcedureQuery;
-import javax.persistence.OneToMany;
-import javax.persistence.OneToOne;
-import javax.persistence.OrderBy;
-import javax.persistence.ParameterMode;
-import javax.persistence.StoredProcedureParameter;
-import javax.persistence.Table;
-import javax.persistence.Temporal;
-import javax.persistence.TemporalType;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Entity;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
+import jakarta.persistence.NamedStoredProcedureQuery;
+import jakarta.persistence.OneToMany;
+import jakarta.persistence.OneToOne;
+import jakarta.persistence.OrderBy;
+import jakarta.persistence.ParameterMode;
+import jakarta.persistence.StoredProcedureParameter;
+import jakarta.persistence.Table;
+import jakarta.persistence.Temporal;
+import jakarta.persistence.TemporalType;
import edu.harvard.iq.dataverse.settings.JvmSettings;
import edu.harvard.iq.dataverse.util.StringUtil;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetConverter.java b/src/main/java/edu/harvard/iq/dataverse/DatasetConverter.java
index 2d19cf5fe06..b779e084250 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetConverter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetConverter.java
@@ -6,12 +6,12 @@
package edu.harvard.iq.dataverse;
-import javax.ejb.EJB;
-import javax.enterprise.inject.spi.CDI;
-import javax.faces.component.UIComponent;
-import javax.faces.context.FacesContext;
-import javax.faces.convert.Converter;
-import javax.faces.convert.FacesConverter;
+import jakarta.ejb.EJB;
+import jakarta.enterprise.inject.spi.CDI;
+import jakarta.faces.component.UIComponent;
+import jakarta.faces.context.FacesContext;
+import jakarta.faces.convert.Converter;
+import jakarta.faces.convert.FacesConverter;
@FacesConverter("datasetConverter")
public class DatasetConverter implements Converter {
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java b/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
index 00936b9365a..3252b7f0367 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
@@ -7,7 +7,7 @@
package edu.harvard.iq.dataverse;
import java.util.Comparator;
-import javax.persistence.Version;
+import jakarta.persistence.Version;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetField.java b/src/main/java/edu/harvard/iq/dataverse/DatasetField.java
index 31d08f84c02..c836a20893f 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetField.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetField.java
@@ -19,20 +19,20 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import javax.persistence.CascadeType;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinTable;
-import javax.persistence.ManyToMany;
-import javax.persistence.ManyToOne;
-import javax.persistence.OneToMany;
-import javax.persistence.OrderBy;
-import javax.persistence.Table;
-import javax.persistence.Transient;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.JoinTable;
+import jakarta.persistence.ManyToMany;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.OneToMany;
+import jakarta.persistence.OrderBy;
+import jakarta.persistence.Table;
+import jakarta.persistence.Transient;
import org.apache.commons.lang3.StringUtils;
@Entity
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldCompoundValue.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldCompoundValue.java
index 5d83f1e4f8d..c679cd7edad 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldCompoundValue.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldCompoundValue.java
@@ -14,17 +14,17 @@
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import javax.persistence.CascadeType;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.ManyToOne;
-import javax.persistence.OneToMany;
-import javax.persistence.OrderBy;
-import javax.persistence.Table;
-import javax.persistence.Transient;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.OneToMany;
+import jakarta.persistence.OrderBy;
+import jakarta.persistence.Table;
+import jakarta.persistence.Transient;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldConstant.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldConstant.java
index e57a2a1538d..1621b80df55 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldConstant.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldConstant.java
@@ -6,8 +6,8 @@
package edu.harvard.iq.dataverse;
-import javax.enterprise.context.Dependent;
-import javax.inject.Named;
+import jakarta.enterprise.context.Dependent;
+import jakarta.inject.Named;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldDefaultValue.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldDefaultValue.java
index bad482dbca9..7746099818e 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldDefaultValue.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldDefaultValue.java
@@ -8,18 +8,18 @@
import java.io.Serializable;
import java.util.Collection;
-import javax.persistence.CascadeType;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.OneToMany;
-import javax.persistence.OrderBy;
-import javax.persistence.Table;
+import jakarta.persistence.CascadeType;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.OneToMany;
+import jakarta.persistence.OrderBy;
+import jakarta.persistence.Table;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldServiceBean.java
index 89f8c11d076..620d4bf3e09 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldServiceBean.java
@@ -17,24 +17,24 @@
import java.util.Set;
import java.util.logging.Logger;
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
-import javax.inject.Named;
-import javax.json.Json;
-import javax.json.JsonArray;
-import javax.json.JsonArrayBuilder;
-import javax.json.JsonException;
-import javax.json.JsonObject;
-import javax.json.JsonObjectBuilder;
-import javax.json.JsonReader;
-import javax.json.JsonString;
-import javax.json.JsonValue;
-import javax.json.JsonValue.ValueType;
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.NonUniqueResultException;
-import javax.persistence.PersistenceContext;
-import javax.persistence.TypedQuery;
+import jakarta.ejb.EJB;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.json.Json;
+import jakarta.json.JsonArray;
+import jakarta.json.JsonArrayBuilder;
+import jakarta.json.JsonException;
+import jakarta.json.JsonObject;
+import jakarta.json.JsonObjectBuilder;
+import jakarta.json.JsonReader;
+import jakarta.json.JsonString;
+import jakarta.json.JsonValue;
+import jakarta.json.JsonValue.ValueType;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.NonUniqueResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.TypedQuery;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.httpclient.HttpException;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
index df126514308..824b486a42d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
@@ -13,8 +13,8 @@
import java.util.Set;
import java.util.TreeMap;
import java.util.MissingResourceException;
-import javax.faces.model.SelectItem;
-import javax.persistence.*;
+import jakarta.faces.model.SelectItem;
+import jakarta.persistence.*;
/**
* Defines the meaning and constraints of a metadata field and its values.
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValidator.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValidator.java
index 3ded24d7a59..6d3fda2812d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValidator.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValidator.java
@@ -5,11 +5,11 @@
*/
package edu.harvard.iq.dataverse;
-import javax.validation.ConstraintValidator;
-import javax.validation.ConstraintValidatorContext;
+import jakarta.validation.ConstraintValidator;
+import jakarta.validation.ConstraintValidatorContext;
import edu.harvard.iq.dataverse.util.BundleUtil;
-import java.util.Collections;
+
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValue.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValue.java
index 2447a6478fd..1064187ccd6 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValue.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValue.java
@@ -10,17 +10,17 @@
import edu.harvard.iq.dataverse.util.MarkupChecker;
import java.io.Serializable;
import java.util.Comparator;
-import java.util.ResourceBundle;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-import javax.persistence.Transient;
+
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Table;
+import jakarta.persistence.Transient;
import org.apache.commons.lang3.StringUtils;
/**
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValueValidator.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValueValidator.java
index 132955859ff..b6c21014f04 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValueValidator.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldValueValidator.java
@@ -13,14 +13,13 @@
import java.util.GregorianCalendar;
import java.util.logging.Logger;
import java.util.regex.Pattern;
-import javax.validation.ConstraintValidator;
-import javax.validation.ConstraintValidatorContext;
+import jakarta.validation.ConstraintValidator;
+import jakarta.validation.ConstraintValidatorContext;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.validation.EMailValidator;
import edu.harvard.iq.dataverse.validation.URLValidator;
import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.validator.routines.UrlValidator;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingDataverse.java b/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingDataverse.java
index 8f8e9b103c1..dec07a09643 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingDataverse.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingDataverse.java
@@ -2,19 +2,19 @@
import java.io.Serializable;
import java.util.Date;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.NamedQueries;
-import javax.persistence.NamedQuery;
-import javax.persistence.OneToOne;
-import javax.persistence.Table;
-import javax.persistence.Temporal;
-import javax.persistence.TemporalType;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
+import jakarta.persistence.OneToOne;
+import jakarta.persistence.Table;
+import jakarta.persistence.Temporal;
+import jakarta.persistence.TemporalType;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingServiceBean.java
index 3789efcd443..39c82bfa3f1 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetLinkingServiceBean.java
@@ -8,12 +8,13 @@
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
-import javax.ejb.Stateless;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.PersistenceContext;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.Query;
+import jakarta.persistence.TypedQuery;
/**
*
@@ -63,7 +64,7 @@ public DatasetLinkingDataverse findDatasetLinkingDataverse(Long datasetId, Long
.setParameter("datasetId", datasetId)
.setParameter("linkingDataverseId", linkingDataverseId)
.getSingleResult();
- } catch (javax.persistence.NoResultException e) {
+ } catch (NoResultException e) {
logger.fine("no datasetLinkingDataverse found for datasetId " + datasetId + " and linkingDataverseId " + linkingDataverseId);
return null;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java b/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java
index 7b857545c20..cc0078ecbc5 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java
@@ -20,25 +20,24 @@
package edu.harvard.iq.dataverse;
-import static edu.harvard.iq.dataverse.DatasetLock.Reason.Workflow;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import java.util.Date;
import java.io.Serializable;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.EnumType;
-import javax.persistence.Enumerated;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.Index;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-import javax.persistence.Temporal;
-import javax.persistence.TemporalType;
-import javax.persistence.NamedQueries;
-import javax.persistence.NamedQuery;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.EnumType;
+import jakarta.persistence.Enumerated;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.Index;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Table;
+import jakarta.persistence.Temporal;
+import jakarta.persistence.TemporalType;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
index 393c6cfad16..d20175b6e1a 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
@@ -58,7 +58,6 @@
import edu.harvard.iq.dataverse.util.StringUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
-import edu.harvard.iq.dataverse.util.URLTokenUtil;
import edu.harvard.iq.dataverse.util.WebloaderUtil;
import edu.harvard.iq.dataverse.validation.URLValidator;
import edu.harvard.iq.dataverse.workflows.WorkflowComment;
@@ -84,27 +83,27 @@
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import javax.ejb.EJB;
-import javax.ejb.EJBException;
-import javax.faces.application.FacesMessage;
-import javax.faces.context.FacesContext;
-import javax.faces.event.ActionEvent;
-import javax.faces.event.ValueChangeEvent;
-import javax.faces.view.ViewScoped;
-import javax.inject.Inject;
-import javax.inject.Named;
+import jakarta.ejb.EJB;
+import jakarta.ejb.EJBException;
+import jakarta.faces.application.FacesMessage;
+import jakarta.faces.context.FacesContext;
+import jakarta.faces.event.ActionEvent;
+import jakarta.faces.event.ValueChangeEvent;
+import jakarta.faces.view.ViewScoped;
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
import org.apache.commons.lang3.StringUtils;
import org.primefaces.event.FileUploadEvent;
import org.primefaces.model.file.UploadedFile;
-import javax.validation.ConstraintViolation;
+import jakarta.validation.ConstraintViolation;
import org.apache.commons.httpclient.HttpClient;
//import org.primefaces.context.RequestContext;
import java.util.Arrays;
import java.util.HashSet;
-import javax.faces.model.SelectItem;
-import javax.faces.validator.ValidatorException;
+import jakarta.faces.model.SelectItem;
+import jakarta.faces.validator.ValidatorException;
import java.util.logging.Level;
import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException;
@@ -125,12 +124,12 @@
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean.MakeDataCountEntry;
import java.util.Collections;
-import javax.faces.component.UIComponent;
-import javax.faces.component.UIInput;
+import jakarta.faces.component.UIComponent;
+import jakarta.faces.component.UIInput;
-import javax.faces.event.AjaxBehaviorEvent;
-import javax.servlet.ServletOutputStream;
-import javax.servlet.http.HttpServletResponse;
+import jakarta.faces.event.AjaxBehaviorEvent;
+import jakarta.servlet.ServletOutputStream;
+import jakarta.servlet.http.HttpServletResponse;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.commons.lang3.mutable.MutableBoolean;
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetRelMaterial.java b/src/main/java/edu/harvard/iq/dataverse/DatasetRelMaterial.java
index f432e4f5bbf..53ea62f566a 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetRelMaterial.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetRelMaterial.java
@@ -6,14 +6,14 @@
package edu.harvard.iq.dataverse;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.ManyToOne;
-import javax.persistence.Version;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.JoinColumn;
+import jakarta.persistence.ManyToOne;
+import jakarta.persistence.Version;
/**
*
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java
index c93236f347b..52eb5868c35 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java
@@ -31,22 +31,21 @@
import java.util.logging.FileHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.ejb.Asynchronous;
-import javax.ejb.EJB;
-import javax.ejb.EJBException;
-import javax.ejb.Stateless;
-import javax.ejb.TransactionAttribute;
-import javax.ejb.TransactionAttributeType;
-import javax.inject.Named;
-import javax.persistence.EntityManager;
-import javax.persistence.LockModeType;
-import javax.persistence.NoResultException;
-import javax.persistence.PersistenceContext;
-import javax.persistence.Query;
-import javax.persistence.StoredProcedureQuery;
-import javax.persistence.TypedQuery;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.ocpsoft.common.util.Strings;
+import jakarta.ejb.Asynchronous;
+import jakarta.ejb.EJB;
+import jakarta.ejb.EJBException;
+import jakarta.ejb.Stateless;
+import jakarta.ejb.TransactionAttribute;
+import jakarta.ejb.TransactionAttributeType;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.LockModeType;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.Query;
+import jakarta.persistence.StoredProcedureQuery;
+import jakarta.persistence.TypedQuery;
+import org.apache.commons.lang3.StringUtils;
/**
*
@@ -119,7 +118,7 @@ public Dataset find(Object pk) {
public Dataset findDeep(Object pk) {
return (Dataset) em.createNamedQuery("Dataset.findById")
.setParameter("id", pk)
- // Optimization hints: retrieve all data in one query; this prevents point queries when iterating over the files
+ // Optimization hints: retrieve all data in one query; this prevents point queries when iterating over the files
.setHint("eclipselink.left-join-fetch", "o.files.ingestRequest")
.setHint("eclipselink.left-join-fetch", "o.files.thumbnailForDataset")
.setHint("eclipselink.left-join-fetch", "o.files.dataTables")
@@ -331,7 +330,7 @@ public Dataset findByGlobalId(String globalId) {
* in the dataset components, a ConstraintViolationException will be thrown,
* which can be further parsed to detect the specific offending values.
* @param id the id of the dataset
- * @throws javax.validation.ConstraintViolationException
+ * @throws ConstraintViolationException
*/
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
@@ -399,7 +398,7 @@ public DatasetVersionUser getDatasetVersionUser(DatasetVersion version, User use
query.setParameter("userId", au.getId());
try {
return query.getSingleResult();
- } catch (javax.persistence.NoResultException e) {
+ } catch (NoResultException e) {
return null;
}
}
@@ -514,7 +513,7 @@ public List listLocks(DatasetLock.Reason lockType, AuthenticatedUse
}
try {
return query.getResultList();
- } catch (javax.persistence.NoResultException e) {
+ } catch (NoResultException e) {
return null;
}
}
@@ -595,7 +594,7 @@ public Map getArchiveDescriptionsForHarvestedDatasets(Set da
return null;
}
- String datasetIdStr = Strings.join(datasetIds, ", ");
+ String datasetIdStr = StringUtils.join(datasetIds, ", ");
String qstr = "SELECT d.id, h.archiveDescription FROM harvestingClient h, dataset d WHERE d.harvestingClient_id = h.id AND d.id IN (" + datasetIdStr + ")";
List