From 84315dc7b7c522826a7284294c82d5827f7071a7 Mon Sep 17 00:00:00 2001 From: yaroslav-epam <138673581+yaroslav-epam@users.noreply.github.com> Date: Fri, 19 Jan 2024 14:44:48 +0200 Subject: [PATCH 1/2] MODDATAIMP-957: remove initial saving of records in SRS (#845) * MODSOURMAN-1021: add endpoint to get IncomingRecord by id with DAO and service layers functionality (#823) * MODSOURMAN-1022: remove step of initial saving of incoming records to SRS (#826) * MODSOURMAN-1070: Fill in Journal Records for created MARC when INSTANCE_CREATED event received (#834) * MODSOURMAN-1070: Filled in two journals records for Instance and Marc bib during INSTANCE_CREATED event type * MODSOURMAN-1063: remove entity type set for journal records to not fetch them with get_job_log_entries (#844) * MODSOURMAN-1063: Update RecordProcessingLogDto to contain incoming record id --------- Co-authored-by: Yaroslav_Kiriak Co-authored-by: Maksat <144414992+Maksat-Galymzhan@users.noreply.github.com> Co-authored-by: Volodymyr Rohach --- NEWS.md | 5 + descriptors/ModuleDescriptor-template.json | 15 + .../java/org/folio/dao/IncomingRecordDao.java | 32 + .../org/folio/dao/IncomingRecordDaoImpl.java | 79 + .../org/folio/dao/JobExecutionDaoImpl.java | 6 +- .../java/org/folio/dao/JournalRecordDao.java | 6 +- .../org/folio/dao/JournalRecordDaoImpl.java | 161 +- .../folio/rest/impl/MetadataProviderImpl.java | 25 +- .../services/ChangeEngineServiceImpl.java | 149 +- .../folio/services/IncomingRecordService.java | 29 + .../services/IncomingRecordServiceImpl.java | 27 + .../folio/services/JournalRecordService.java | 16 +- .../services/JournalRecordServiceImpl.java | 14 +- .../RecordsPublishingServiceImpl.java | 51 +- .../folio/services/journal/JournalUtil.java | 83 +- .../StoredRecordChunksKafkaHandler.java | 11 +- .../create_get_job_log_entries_function.sql | 441 ++++- .../create_incoming_records_table.sql | 10 + .../templates/db_scripts/schema.json | 10 + .../folio/dao/IncomingRecordDaoImplTest.java | 88 + .../org/folio/rest/impl/AbstractRestTest.java | 16 +- .../changeManager/ChangeManagerAPITest.java | 132 +- .../MetaDataProviderJobLogEntriesAPITest.java | 772 +------- ...rRecordProcessingLogCollectionAPITest.java | 1630 +++++++++++++++++ .../MetadataProviderJobExecutionAPITest.java | 41 + .../services/ChangeEngineServiceImplTest.java | 54 +- ...tDrivenChunkProcessingServiceImplTest.java | 19 +- .../IncomingRecordServiceImplUnitTest.java | 40 + .../org/folio/services/JournalUtilTest.java | 124 ++ ...ProcessedEventHandlingServiceImplTest.java | 37 +- ...DataImportJournalConsumerVerticleTest.java | 15 +- .../RawMarcChunkConsumersVerticleTest.java | 38 +- ...toredRecordChunkConsumersVerticleTest.java | 12 +- .../util/MarcImportEventsHandlerTest.java | 6 +- ramls/change-manager.raml | 1 + ramls/metadata-provider.raml | 19 +- ramls/raml-storage | 2 +- 37 files changed, 3095 insertions(+), 1121 deletions(-) create mode 100644 mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDao.java create mode 100644 mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDaoImpl.java create mode 100644 mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordService.java create mode 100644 mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordServiceImpl.java create mode 100644 mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_incoming_records_table.sql create mode 100644 mod-source-record-manager-server/src/test/java/org/folio/dao/IncomingRecordDaoImplTest.java create mode 100644 mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderRecordProcessingLogCollectionAPITest.java create mode 100644 mod-source-record-manager-server/src/test/java/org/folio/services/IncomingRecordServiceImplUnitTest.java diff --git a/NEWS.md b/NEWS.md index 17d6c81ff..66af50300 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,9 @@ ## 2023-xx-xx v3.8.0-SNAPSHOT * [MODSOURMAN-1085](https://issues.folio.org/browse/MODSOURMAN-1085) MARC record with a 100 tag without a $a is being discarded on import. +* [MODSOURMAN-1020](https://issues.folio.org/browse/MODSOURMAN-1020) Add table to save incoming records for DI logs +* [MODSOURMAN-1021](https://issues.folio.org/browse/MODSOURMAN-1021) Provide endpoint for getting parsed content for DI log +* [MODSOURMAN-1022](https://issues.folio.org/browse/MODSOURMAN-1022) Remove step of initial saving of incoming records to SRS +* [MODSOURMAN-1070](https://issues.folio.org/browse/MODSOURMAN-1070) Fill in Journal Records for created MARC when INSTANCE_CREATED event received * [MODSOURMAN-1030](https://issues.folio.org/browse/MODSOURMAN-1030) The number of updated records is not correct displayed in the 'SRS Marc' column in the 'Log summary' table * [MODSOURMAN-976](https://issues.folio.org/browse/MODSOURMAN-976) Incorrect error counts * [MODSOURMAN-1093](https://issues.folio.org/browse/MODSOURMAN-1093) EventHandlingUtil hangs forever on error @@ -7,6 +11,7 @@ * [MODSOURMAN-1091](https://issues.folio.org/browse/MODSOURMAN-1091) The '1' number of Instance is displayed in cell in the row with the 'Updated' row header at the individual import job's log * [MODSOURMAN-1108](https://issues.folio.org/browse/MODSOURMAN-1108) MARC authority record is not created when use Job profile with match profile and action only on no-match branch * [MODSOURMAN-1106](https://issues.folio.org/browse/MODSOURMAN-1106) The status of Instance is '-' in the Import log after uploading file. The numbers of updated SRS and Instance are not displayed in the Summary table. +* [MODSOURMAN-1063](https://issues.folio.org/browse/MODSOURMAN-1063) Update RecordProcessingLogDto to contain incoming record id ## 2023-10-13 v3.7.0 * [MODSOURMAN-1045](https://issues.folio.org/browse/MODSOURMAN-1045) Allow create action with non-matches for instance without match profile diff --git a/descriptors/ModuleDescriptor-template.json b/descriptors/ModuleDescriptor-template.json index 52d176dea..87208d025 100644 --- a/descriptors/ModuleDescriptor-template.json +++ b/descriptors/ModuleDescriptor-template.json @@ -501,6 +501,15 @@ "permissionsRequired": [ "metadata-provider.jobexecutions.get" ] + }, + { + "methods": [ + "GET" + ], + "pathPattern": "/metadata-provider/incomingRecords/{recordId}", + "permissionsRequired": [ + "metadata-provider.incomingrecords.get" + ] } ] }, @@ -651,6 +660,11 @@ "displayName": "Metadata Provider - get jobExecution logs", "description": "Get JobExecutionLogDto" }, + { + "permissionName": "metadata-provider.incomingrecords.get", + "displayName": "Metadata Provider - get incoming record", + "description": "Get IncomingRecord" + }, { "permissionName": "change-manager.jobexecutions.post", "displayName": "Change Manager - create jobExecutions", @@ -718,6 +732,7 @@ "subPermissions": [ "metadata-provider.jobexecutions.get", "metadata-provider.logs.get", + "metadata-provider.incomingrecords.get", "change-manager.jobexecutions.post", "change-manager.jobexecutions.put", "change-manager.jobexecutions.get", diff --git a/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDao.java b/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDao.java new file mode 100644 index 000000000..abc33e5be --- /dev/null +++ b/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDao.java @@ -0,0 +1,32 @@ +package org.folio.dao; + +import io.vertx.core.Future; +import io.vertx.sqlclient.Row; +import io.vertx.sqlclient.RowSet; +import org.folio.rest.jaxrs.model.IncomingRecord; + +import java.util.List; +import java.util.Optional; + +/** + * DAO interface for the {@link IncomingRecord} entity + */ +public interface IncomingRecordDao { + + /** + * Searches for {@link IncomingRecord} by id + * + * @param id incomingRecord id + * @return optional of incomingRecord + */ + Future> getById(String id, String tenantId); + + /** + * Saves {@link IncomingRecord} entities into DB + * + * @param incomingRecords {@link IncomingRecord} entities to save + * @param tenantId tenant id + * @return future with created incomingRecords entities represented as row set + */ + Future>> saveBatch(List incomingRecords, String tenantId); +} diff --git a/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDaoImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDaoImpl.java new file mode 100644 index 000000000..966c3be40 --- /dev/null +++ b/mod-source-record-manager-server/src/main/java/org/folio/dao/IncomingRecordDaoImpl.java @@ -0,0 +1,79 @@ +package org.folio.dao; + +import io.vertx.core.Future; +import io.vertx.core.Promise; +import io.vertx.core.json.JsonObject; +import io.vertx.sqlclient.Row; +import io.vertx.sqlclient.RowSet; +import io.vertx.sqlclient.Tuple; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.folio.dao.util.PostgresClientFactory; +import org.folio.rest.jaxrs.model.IncomingRecord; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Repository; + +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +import static java.lang.String.format; +import static org.folio.rest.persist.PostgresClient.convertToPsqlStandard; + +@Repository +public class IncomingRecordDaoImpl implements IncomingRecordDao { + + private static final Logger LOGGER = LogManager.getLogger(); + public static final String INCOMING_RECORDS_TABLE = "incoming_records"; + private static final String GET_BY_ID_SQL = "SELECT * FROM %s.%s WHERE id = $1"; + private static final String INSERT_SQL = "INSERT INTO %s.%s (id, job_execution_id, incoming_record) VALUES ($1, $2, $3)"; + + @Autowired + private PostgresClientFactory pgClientFactory; + + @Override + public Future> getById(String id, String tenantId) { + LOGGER.debug("getById:: Get IncomingRecord by id = {} from the {} table", id, INCOMING_RECORDS_TABLE); + Promise> promise = Promise.promise(); + try { + String query = format(GET_BY_ID_SQL, convertToPsqlStandard(tenantId), INCOMING_RECORDS_TABLE); + pgClientFactory.createInstance(tenantId).selectRead(query, Tuple.of(UUID.fromString(id)), promise); + } catch (Exception e) { + LOGGER.warn("getById:: Error getting IncomingRecord by id", e); + promise.fail(e); + } + return promise.future().map(rowSet -> rowSet.rowCount() == 0 ? Optional.empty() + : Optional.of(mapRowToIncomingRecord(rowSet.iterator().next()))); + } + + @Override + public Future>> saveBatch(List incomingRecords, String tenantId) { + LOGGER.debug("saveBatch:: Save IncomingRecord entity to the {} table", INCOMING_RECORDS_TABLE); + Promise>> promise = Promise.promise(); + try { + String query = format(INSERT_SQL, convertToPsqlStandard(tenantId), INCOMING_RECORDS_TABLE); + List tuples = incomingRecords.stream().map(this::prepareInsertQueryParameters).toList(); + LOGGER.debug("IncomingRecordDaoImpl:: Save query = {}; tuples = {}", query, tuples); + pgClientFactory.createInstance(tenantId).execute(query, tuples, promise); + } catch (Exception e) { + LOGGER.warn("saveBatch:: Error saving IncomingRecord entity", e); + promise.fail(e); + } + return promise.future().onFailure(e -> LOGGER.warn("saveBatch:: Error saving IncomingRecord entity", e)); + } + + private IncomingRecord mapRowToIncomingRecord(Row row) { + JsonObject jsonObject = row.getJsonObject("incoming_record"); + return new IncomingRecord().withId(String.valueOf(row.getUUID("id"))) + .withJobExecutionId(String.valueOf(row.getUUID("job_execution_id"))) + .withRecordType(IncomingRecord.RecordType.fromValue(jsonObject.getString("recordType"))) + .withOrder(jsonObject.getInteger("order")) + .withRawRecordContent(jsonObject.getString("rawRecordContent")) + .withParsedRecordContent(jsonObject.getString("parsedRecordContent")); + } + + private Tuple prepareInsertQueryParameters(IncomingRecord incomingRecord) { + return Tuple.of(UUID.fromString(incomingRecord.getId()), UUID.fromString(incomingRecord.getJobExecutionId()), + JsonObject.mapFrom(incomingRecord)); + } +} diff --git a/mod-source-record-manager-server/src/main/java/org/folio/dao/JobExecutionDaoImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/dao/JobExecutionDaoImpl.java index 0b95756cb..e3fa04dd3 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/dao/JobExecutionDaoImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/dao/JobExecutionDaoImpl.java @@ -56,6 +56,7 @@ import static java.lang.String.format; import static java.util.Objects.nonNull; import static org.apache.commons.lang3.StringUtils.EMPTY; +import static org.folio.dao.IncomingRecordDaoImpl.INCOMING_RECORDS_TABLE; import static org.folio.dao.util.JobExecutionDBConstants.COMPLETED_DATE_FIELD; import static org.folio.dao.util.JobExecutionDBConstants.CURRENTLY_PROCESSED_FIELD; import static org.folio.dao.util.JobExecutionDBConstants.ERROR_STATUS_FIELD; @@ -597,12 +598,13 @@ public Future hardDeleteJobExecutions(long diffNumberOfDays, String ten return Future.succeededFuture(); } - UUID[] uuids = jobExecutionIds.stream().map(UUID::fromString).collect(Collectors.toList()).toArray(UUID[]::new); + UUID[] uuids = jobExecutionIds.stream().map(UUID::fromString).toList().toArray(UUID[]::new); Future> jobExecutionProgressFuture = Future.future(rowSetPromise -> deleteFromRelatedTable(PROGRESS_TABLE_NAME, uuids, sqlConnection, tenantId, rowSetPromise, postgresClient)); Future> jobExecutionSourceChunksFuture = Future.future(rowSetPromise -> deleteFromRelatedTableWithDeprecatedNaming(JOB_EXECUTION_SOURCE_CHUNKS_TABLE_NAME, uuids, sqlConnection, tenantId, rowSetPromise, postgresClient)); Future> journalRecordsFuture = Future.future(rowSetPromise -> deleteFromRelatedTable(JOURNAL_RECORDS_TABLE_NAME, uuids, sqlConnection, tenantId, rowSetPromise, postgresClient)); - return CompositeFuture.all(jobExecutionProgressFuture, jobExecutionSourceChunksFuture, journalRecordsFuture) + Future> incomingRecordsFuture = Future.future(rowSetPromise -> deleteFromRelatedTable(INCOMING_RECORDS_TABLE, uuids, sqlConnection, tenantId, rowSetPromise, postgresClient)); + return CompositeFuture.all(jobExecutionProgressFuture, jobExecutionSourceChunksFuture, journalRecordsFuture, incomingRecordsFuture) .compose(ar -> Future.>future(rowSetPromise -> deleteFromJobExecutionTable(uuids, sqlConnection, tenantId, rowSetPromise, postgresClient))) .map(true); })); diff --git a/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDao.java b/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDao.java index b4bd5b3c4..072bb74a0 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDao.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDao.java @@ -4,9 +4,9 @@ import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; import org.folio.rest.jaxrs.model.JobExecutionSummaryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDtoCollection; import org.folio.rest.jaxrs.model.JournalRecord; import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; import java.util.List; import java.util.Optional; @@ -55,7 +55,7 @@ public interface JournalRecordDao { Future deleteByJobExecutionId(String jobExecutionId, String tenantId); /** - * Searches for JobLogEntryDto entities by jobExecutionId and sorts them using specified sort criteria and direction + * Searches for RecordProcessingLogDtoCollection by jobExecutionId and sorts them using specified sort criteria and direction * * @param jobExecutionId job execution id * @param sortBy sorting criteria @@ -67,7 +67,7 @@ public interface JournalRecordDao { * @param tenantId tenantId * @return future with JobLogEntryDto collection */ - Future getJobLogEntryDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId); + Future getRecordProcessingLogDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId); /** * Searches for RecordProcessingLogDto entities by jobExecutionId and recordId diff --git a/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDaoImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDaoImpl.java index e52d5cc58..929df5715 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDaoImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/dao/JournalRecordDaoImpl.java @@ -6,14 +6,12 @@ import io.vertx.sqlclient.RowSet; import io.vertx.sqlclient.SqlResult; import io.vertx.sqlclient.Tuple; -import org.apache.commons.lang3.ArrayUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.folio.dao.util.JournalRecordsColumns; import org.folio.dao.util.PostgresClientFactory; import org.folio.rest.jaxrs.model.EntityProcessingSummary; import org.folio.rest.jaxrs.model.JobExecutionSummaryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDtoCollection; import org.folio.rest.jaxrs.model.JournalRecord; import org.folio.rest.jaxrs.model.JournalRecord.ActionStatus; import org.folio.rest.jaxrs.model.JournalRecord.ActionType; @@ -22,6 +20,7 @@ import org.folio.rest.jaxrs.model.ProcessedHoldingsInfo; import org.folio.rest.jaxrs.model.ProcessedItemInfo; import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; import org.folio.rest.jaxrs.model.RelatedInvoiceLineInfo; import org.folio.rest.jaxrs.model.RelatedPoLineInfo; import org.springframework.beans.factory.annotation.Autowired; @@ -38,6 +37,7 @@ import java.util.Date; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -45,6 +45,7 @@ import java.util.stream.Collectors; import static java.lang.String.format; +import static java.util.stream.Collectors.toList; import static org.apache.commons.lang3.StringUtils.EMPTY; import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.folio.dao.util.JournalRecordsColumns.ACTION_DATE; @@ -85,6 +86,11 @@ import static org.folio.dao.util.JournalRecordsColumns.ITEM_ENTITY_HRID; import static org.folio.dao.util.JournalRecordsColumns.ITEM_ENTITY_ID; import static org.folio.dao.util.JournalRecordsColumns.JOB_EXECUTION_ID; +import static org.folio.dao.util.JournalRecordsColumns.ORDER_ID; +import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ACTION_STATUS; +import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_ERROR; +import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_HRID; +import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_ID; import static org.folio.dao.util.JournalRecordsColumns.SOURCE_ENTITY_ERROR; import static org.folio.dao.util.JournalRecordsColumns.SOURCE_ID; import static org.folio.dao.util.JournalRecordsColumns.SOURCE_RECORD_ACTION_STATUS; @@ -122,13 +128,7 @@ import static org.folio.dao.util.JournalRecordsColumns.TOTAL_UPDATED_ITEMS; import static org.folio.dao.util.JournalRecordsColumns.TOTAL_UPDATED_ORDERS; import static org.folio.dao.util.JournalRecordsColumns.TOTAL_UPDATED_SOURCE_RECORDS; -import static org.folio.dao.util.JournalRecordsColumns.ORDER_ID; -import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ACTION_STATUS; -import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_ID; -import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_HRID; -import static org.folio.dao.util.JournalRecordsColumns.PO_LINE_ENTITY_ERROR; import static org.folio.rest.jaxrs.model.ActionStatus.UPDATED; -import static org.folio.rest.jaxrs.model.JobLogEntryDto.SourceRecordType.MARC_HOLDINGS; import static org.folio.rest.persist.PostgresClient.convertToPsqlStandard; @Repository @@ -137,6 +137,8 @@ public class JournalRecordDaoImpl implements JournalRecordDao { private static final Logger LOGGER = LogManager.getLogger(); public static final String SOURCE_RECORD_ENTITY_TYPE = "source_record_entity_type"; public static final String ORDER_ENTITY_ID = "order_entity_id"; + public static final String INCOMING_RECORD_ID = "incoming_record_id"; + public static final String ITEM_HOLDINGS_ID = "item_holdings_id"; private final Set sortableFields = Set.of("source_record_order", "action_type", "error"); private final Set jobLogEntrySortableFields = Set.of("source_record_order", "title", "source_record_action_status", "instance_action_status", "holdings_action_status", "item_action_status", "order_action_status", "invoice_action_status", "error"); @@ -176,7 +178,7 @@ public Future>> saveBatch(List journalRecords, S LOGGER.info("saveBatch:: Trying to save list of JournalRecord entities to the {} table", JOURNAL_RECORDS_TABLE); Promise>> promise = Promise.promise(); try { - List tupleList = journalRecords.stream().map(this::prepareInsertQueryParameters).collect(Collectors.toList()); + List tupleList = journalRecords.stream().map(this::prepareInsertQueryParameters).collect(toList()); String query = format(INSERT_SQL, convertToPsqlStandard(tenantId), JOURNAL_RECORDS_TABLE); LOGGER.trace("saveBatch:: JournalRecordDaoImpl::saveBatch query = {}; tuples = {}", query, tupleList); pgClientFactory.createInstance(tenantId).execute(query, tupleList, promise); @@ -192,7 +194,7 @@ private Tuple prepareInsertQueryParameters(JournalRecord journalRecord) { UUID.fromString(journalRecord.getJobExecutionId()), UUID.fromString(journalRecord.getSourceId()), journalRecord.getSourceRecordOrder(), - journalRecord.getEntityType().toString(), + journalRecord.getEntityType() != null ? journalRecord.getEntityType().toString() : EMPTY, journalRecord.getEntityId(), journalRecord.getEntityHrId() != null ? journalRecord.getEntityHrId() : EMPTY, journalRecord.getActionType().toString(), @@ -239,7 +241,7 @@ public Future deleteByJobExecutionId(String jobExecutionId, String tena } @Override - public Future getJobLogEntryDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId) { + public Future getRecordProcessingLogDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId) { LOGGER.trace("getJobLogEntryDtoCollection:: Trying to get JobLogEntryDtoCollection entity by jobExecutionId = {}", jobExecutionId); if (!jobLogEntrySortableFields.contains(sortBy)) { return Future.failedFuture(new BadRequestException(format("The specified field for sorting job log entries is invalid: '%s'", sortBy))); @@ -248,7 +250,7 @@ public Future getJobLogEntryDtoCollection(String jobEx String query = format(GET_JOB_LOG_ENTRIES_BY_JOB_EXECUTION_ID_QUERY, jobExecutionId, sortBy, order, limit, offset, errorsOnly, entityType); LOGGER.trace("JournalRecordDaoImpl::getJobLogEntryDtoCollection query = {};", query); pgClientFactory.createInstance(tenantId).select(query, promise); - return promise.future().map(this::mapRowSetToJobLogDtoCollection); + return promise.future().map(this::mapRowSetToRecordProcessingLogDtoCollection); } @Override @@ -316,48 +318,75 @@ private String prepareSortingClause(String sortBy, String order) { return format(ORDER_BY_PATTERN, sortBy, order); } - private JobLogEntryDtoCollection mapRowSetToJobLogDtoCollection(RowSet rowSet) { - var jobLogEntryDtoCollection = new JobLogEntryDtoCollection() - .withTotalRecords(0); + private RecordProcessingLogDtoCollection mapRowSetToRecordProcessingLogDtoCollection(RowSet rowSet) { + var recordProcessingLogDto = new RecordProcessingLogDtoCollection().withTotalRecords(0); rowSet.forEach(row -> - jobLogEntryDtoCollection + recordProcessingLogDto .withTotalRecords(row.getInteger(TOTAL_COUNT)) .getEntries().add(mapJobLogEntryRow(row)) ); - return jobLogEntryDtoCollection; + + return processMultipleHoldingsAndItemsIfNeeded(recordProcessingLogDto); } - private JobLogEntryDto mapJobLogEntryRow(Row row) { - final var entityType = mapToEntityType(row.getString(SOURCE_RECORD_ENTITY_TYPE)); - final var entityHrid = row.getArrayOfStrings(HOLDINGS_ENTITY_HRID); + private RecordProcessingLogDto mapJobLogEntryRow(Row row) { + RecordProcessingLogDto recordProcessingLogSummary = new RecordProcessingLogDto(); + + List processedHoldingsInfo = new LinkedList<>(); + List processedItemInfo = new LinkedList<>(); + + final var entityType = mapToRecordProcessingEntityType(row.getString(SOURCE_RECORD_ENTITY_TYPE)); + final var holdingsEntityHrid = row.getString(HOLDINGS_ENTITY_HRID); final var holdingsActionStatus = mapNameToEntityActionStatus(row.getString(HOLDINGS_ACTION_STATUS)); - return new JobLogEntryDto() + recordProcessingLogSummary + .withSourceRecordType(entityType) .withJobExecutionId(row.getValue(JOB_EXECUTION_ID).toString()) + .withIncomingRecordId(row.getValue(INCOMING_RECORD_ID).toString()) .withSourceRecordId(row.getValue(SOURCE_ID).toString()) .withSourceRecordOrder(isEmpty(row.getString(INVOICE_ACTION_STATUS)) ? row.getInteger(SOURCE_RECORD_ORDER).toString() : row.getString(INVOICE_LINE_NUMBER)) - .withSourceRecordTitle(getJobLogEntryTitle(row.getString(TITLE), entityType, entityHrid, holdingsActionStatus)) - .withSourceRecordType(entityType) - .withHoldingsRecordHridList(ArrayUtils.isEmpty(entityHrid) ? Collections.emptyList() : Arrays.asList(entityHrid)) + .withSourceRecordTitle(getJobLogEntryTitle(row.getString(TITLE), entityType, holdingsEntityHrid, holdingsActionStatus)) .withSourceRecordActionStatus(mapNameToEntityActionStatus(row.getString(SOURCE_RECORD_ACTION_STATUS))) - .withInstanceActionStatus(mapNameToEntityActionStatus(row.getString(INSTANCE_ACTION_STATUS))) - .withHoldingsActionStatus(holdingsActionStatus) - .withItemActionStatus(mapNameToEntityActionStatus(row.getString(ITEM_ACTION_STATUS))) - .withAuthorityActionStatus(mapNameToEntityActionStatus(row.getString(AUTHORITY_ACTION_STATUS))) - .withPoLineActionStatus(mapNameToEntityActionStatus(row.getString(PO_LINE_ACTION_STATUS))) - .withInvoiceActionStatus(mapNameToEntityActionStatus(row.getString(INVOICE_ACTION_STATUS))) + .withError(row.getString(SOURCE_ENTITY_ERROR)) + .withRelatedInstanceInfo(constructInstanceProcessingInfo(row)) + .withRelatedAuthorityInfo(constructProcessedEntityWithSingleIdInfoBasedOnEntityType(row, + AUTHORITY_ACTION_STATUS, AUTHORITY_ENTITY_ID, null, AUTHORITY_ENTITY_ERROR)) + .withRelatedPoLineInfo(new RelatedPoLineInfo() + .withActionStatus(mapNameToEntityActionStatus(row.getString(PO_LINE_ACTION_STATUS))) + .withIdList(constructSingletonListFromColumn(row, PO_LINE_ENTITY_ID)) + .withHridList(constructSingletonListFromColumn(row, PO_LINE_ENTITY_HRID)) + .withError(row.getString(PO_LINE_ENTITY_ERROR)) + .withOrderId(row.getString(ORDER_ENTITY_ID))) + .withRelatedInvoiceInfo(constructProcessedEntityInfoBasedOnEntityType(row, + INVOICE_ACTION_STATUS, INVOICE_ENTITY_ID, INVOICE_ENTITY_HRID, INVOICE_ENTITY_ERROR)) + .withRelatedInvoiceLineInfo(constructInvoiceLineInfo(row)) + .withSourceRecordTenantId(row.getString(SOURCE_RECORD_TENANT_ID)) .withInvoiceLineJournalRecordId(Objects.isNull(row.getValue(INVOICE_LINE_JOURNAL_RECORD_ID)) - ? null : row.getValue(INVOICE_LINE_JOURNAL_RECORD_ID).toString()) - .withError(row.getString(ERROR)); + ? null : row.getValue(INVOICE_LINE_JOURNAL_RECORD_ID).toString()); + + ProcessedHoldingsInfo processedHoldings = constructProcessedHoldingsInfoBasedOnEntityType(row, HOLDINGS_ACTION_STATUS, HOLDINGS_ENTITY_ID, JournalRecordsColumns.HOLDINGS_ENTITY_HRID, HOLDINGS_PERMANENT_LOCATION_ID, HOLDINGS_ENTITY_ERROR); + ProcessedItemInfo processedItem = constructProcessedItemInfoBasedOnEntityType(row, ITEM_ACTION_STATUS, ITEM_ENTITY_ID, ITEM_ENTITY_HRID, ITEM_HOLDINGS_ID, ITEM_ENTITY_ERROR); + if (Objects.nonNull(processedHoldings.getActionStatus()) || processedItem.getActionStatus() == UPDATED) { + processedHoldingsInfo.add(processedHoldings); + } + if (Objects.nonNull(processedItem.getActionStatus())) { + processedItemInfo.add(processedItem); + } + + recordProcessingLogSummary. + withRelatedItemInfo(processedItemInfo.stream().distinct().toList()) + .withRelatedHoldingsInfo(processedHoldingsInfo.stream().distinct().toList()); + + return recordProcessingLogSummary; } - private String getJobLogEntryTitle(String title, JobLogEntryDto.SourceRecordType entityType, String[] entityHrid, + private String getJobLogEntryTitle(String title, RecordProcessingLogDto.SourceRecordType entityType, String entityHrid, org.folio.rest.jaxrs.model.ActionStatus holdingsActionStatus) { - return MARC_HOLDINGS.equals(entityType) + return RecordProcessingLogDto.SourceRecordType.MARC_HOLDINGS.equals(entityType) && isActionStatusUpdatedOrCreated(holdingsActionStatus) - ? "Holdings " + entityHrid[0] + ? "Holdings " + entityHrid : title; } @@ -378,10 +407,10 @@ private RecordProcessingLogDto mapRowSetToRecordProcessingLogDto(RowSet res recordProcessingLogSummary .withJobExecutionId(row.getValue(JOB_EXECUTION_ID).toString()) .withSourceRecordId(row.getValue(SOURCE_ID).toString()) - .withSourceRecordOrder(row.getInteger(SOURCE_RECORD_ORDER)) + .withSourceRecordOrder(row.getInteger(SOURCE_RECORD_ORDER).toString()) .withSourceRecordTitle(row.getString(TITLE)) .withSourceRecordActionStatus(mapNameToEntityActionStatus(row.getString(SOURCE_RECORD_ACTION_STATUS))) - .withError(row.getString(SOURCE_ENTITY_ERROR)) + .withError(row.getString(JournalRecordsColumns.SOURCE_ENTITY_ERROR)) .withSourceRecordTenantId(row.getString(SOURCE_RECORD_TENANT_ID)) .withRelatedInstanceInfo(constructInstanceProcessingInfo(row)) .withRelatedAuthorityInfo(constructProcessedEntityWithSingleIdInfoBasedOnEntityType(row, @@ -398,7 +427,7 @@ private RecordProcessingLogDto mapRowSetToRecordProcessingLogDto(RowSet res } resultSet.forEach(r -> { - ProcessedHoldingsInfo processedHoldings = constructProcessedHoldingsInfoBasedOnEntityType(r, HOLDINGS_ACTION_STATUS, HOLDINGS_ENTITY_ID, HOLDINGS_ENTITY_HRID, HOLDINGS_PERMANENT_LOCATION_ID, HOLDINGS_ENTITY_ERROR); + ProcessedHoldingsInfo processedHoldings = constructProcessedHoldingsInfoBasedOnEntityType(r, HOLDINGS_ACTION_STATUS, HOLDINGS_ENTITY_ID, JournalRecordsColumns.HOLDINGS_ENTITY_HRID, HOLDINGS_PERMANENT_LOCATION_ID, HOLDINGS_ENTITY_ERROR); ProcessedItemInfo processedItem = constructProcessedItemInfoBasedOnEntityType(r, ITEM_ACTION_STATUS, ITEM_ENTITY_ID, ITEM_ENTITY_HRID, HOLDINGS_ENTITY_ID, ITEM_ENTITY_ERROR); if (Objects.nonNull(processedHoldings.getActionStatus()) || processedItem.getActionStatus() == UPDATED) { processedHoldingsInfo.add(processedHoldings); @@ -474,8 +503,8 @@ private org.folio.rest.jaxrs.model.ActionStatus mapNameToEntityActionStatus(Stri return name == null ? null : org.folio.rest.jaxrs.model.ActionStatus.fromValue(name); } - private JobLogEntryDto.SourceRecordType mapToEntityType(String entityType) { - return entityType == null ? null : JobLogEntryDto.SourceRecordType.fromValue(entityType); + private RecordProcessingLogDto.SourceRecordType mapToRecordProcessingEntityType(String entityType) { + return entityType == null ? null : RecordProcessingLogDto.SourceRecordType.fromValue(entityType); } private JobExecutionSummaryDto mapRowToJobExecutionSummaryDto(Row row) { @@ -515,4 +544,54 @@ private EntityProcessingSummary mapToEntityProcessingSummary(Row row, String tot .withTotalDiscardedEntities(totalDiscarded) .withTotalErrors(totalErrors); } + + private static RecordProcessingLogDtoCollection processMultipleHoldingsAndItemsIfNeeded(RecordProcessingLogDtoCollection recordProcessingLogDto) { + List entries = recordProcessingLogDto.getEntries(); + if (!ifNeedToMerge(entries)) { + return recordProcessingLogDto; + } + Map> relatedHoldingsInfoBySourceRecordId = + entries.stream() + .collect(Collectors.groupingBy( + RecordProcessingLogDto::getSourceRecordId, + Collectors.mapping(RecordProcessingLogDto::getRelatedHoldingsInfo, + Collectors.flatMapping(List::stream, toList()) + ))); + + Map> relatedItemInfoBySourceId = + entries.stream() + .collect(Collectors.groupingBy( + RecordProcessingLogDto::getSourceRecordId, + Collectors.mapping(RecordProcessingLogDto::getRelatedItemInfo, + Collectors.flatMapping(List::stream, toList()) + ))); + + List mergedEntries = relatedHoldingsInfoBySourceRecordId.entrySet() + .stream().map(e -> { + String sourceRecordId = e.getKey(); + List relatedItemInfos = relatedItemInfoBySourceId.get(sourceRecordId); + + RecordProcessingLogDto firstRecordWithCurrentSourceId = entries.stream() + .filter(record -> record.getSourceRecordId().equals(sourceRecordId)) + .findFirst().orElseGet(RecordProcessingLogDto::new); + + return firstRecordWithCurrentSourceId + .withRelatedHoldingsInfo(e.getValue().stream().distinct().toList()) + .withRelatedItemInfo(relatedItemInfos.stream().distinct().toList()); + }).collect(toList()); + return recordProcessingLogDto.withEntries(mergedEntries); + } + + private static boolean ifNeedToMerge(List entries) { + Map sourceRecordIdCounts = entries.stream() + .filter(e -> e.getRelatedHoldingsInfo() != null && !e.getRelatedHoldingsInfo().isEmpty()) + .collect(Collectors.groupingBy(RecordProcessingLogDto::getSourceRecordId, Collectors.counting())); + + Map sourceItemRecordIdCounts = entries.stream() + .filter(e -> e.getRelatedItemInfo() != null && !e.getRelatedItemInfo().isEmpty()) + .collect(Collectors.groupingBy(RecordProcessingLogDto::getSourceRecordId, Collectors.counting())); + + return sourceRecordIdCounts.values().stream().anyMatch(count -> count > 1) || + sourceItemRecordIdCounts.values().stream().anyMatch(count -> count > 1); + } } diff --git a/mod-source-record-manager-server/src/main/java/org/folio/rest/impl/MetadataProviderImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/rest/impl/MetadataProviderImpl.java index bd3473605..edcc865a1 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/rest/impl/MetadataProviderImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/rest/impl/MetadataProviderImpl.java @@ -19,6 +19,7 @@ import org.folio.rest.jaxrs.model.MetadataProviderJobLogEntriesJobExecutionIdGetEntityType; import org.folio.rest.jaxrs.resource.MetadataProvider; import org.folio.rest.tools.utils.TenantTool; +import org.folio.services.IncomingRecordService; import org.folio.services.JobExecutionService; import org.folio.services.JournalRecordService; import org.folio.spring.SpringContextUtil; @@ -51,6 +52,8 @@ public class MetadataProviderImpl implements MetadataProvider { private JobExecutionService jobExecutionService; @Autowired private JournalRecordService journalRecordService; + @Autowired + private IncomingRecordService incomingRecordService; private String tenantId; public MetadataProviderImpl(Vertx vertx, String tenantId) { //NOSONAR @@ -111,7 +114,7 @@ public void getMetadataProviderJobLogEntriesByJobExecutionId(String jobExecution try { LOGGER.debug("getMetadataProviderJobLogEntriesByJobExecutionId:: jobExecutionId {}, sortBy {}, errorsOnly {}, entityType {}", jobExecutionId, sortBy, errorsOnly, entityType.name()); - journalRecordService.getJobLogEntryDtoCollection(jobExecutionId, sortBy, order.name(), errorsOnly, entityType.name(), limit, offset, tenantId) + journalRecordService.getRecordProcessingLogDtoCollection(jobExecutionId, sortBy, order.name(), errorsOnly, entityType.name(), limit, offset, tenantId) .map(GetMetadataProviderJobLogEntriesByJobExecutionIdResponse::respond200WithApplicationJson) .map(Response.class::cast) .otherwise(ExceptionHelper::mapExceptionToResponse) @@ -197,6 +200,26 @@ public void getMetadataProviderJobExecutionsUsers(String totalRecords, int offse }); } + @Override + public void getMetadataProviderIncomingRecordsByRecordId(String recordId, Map okapiHeaders, Handler> asyncResultHandler, Context vertxContext) { + vertxContext.runOnContext(v -> { + try { + LOGGER.debug("getMetadataProviderIncomingRecordsByRecordId:: tenantId {}", tenantId); + incomingRecordService.getById(recordId, tenantId) + .map(incomingRecordOptional -> incomingRecordOptional + .map(GetMetadataProviderIncomingRecordsByRecordIdResponse::respond200WithApplicationJson) + .orElseGet(() -> GetMetadataProviderIncomingRecordsByRecordIdResponse + .respond404WithTextPlain(format("IncomingRecord by id: '%s' was not found", recordId)))) + .map(Response.class::cast) + .otherwise(ExceptionHelper::mapExceptionToResponse) + .onComplete(asyncResultHandler); + } catch (Exception e) { + LOGGER.warn("getMetadataProviderIncomingRecordsByRecordId:: Failed to retrieve IncomingRecord by id", e); + asyncResultHandler.handle(Future.succeededFuture(ExceptionHelper.mapExceptionToResponse(e))); + } + }); + } + private JobExecutionFilter buildJobExecutionFilter(List statusAny, List profileIdNotAny, String statusNot, List uiStatusAny, String hrIdPattern, String fileNamePattern, List fileNameNotAny, List profileIdAny, List subordinationTypeNotAny, diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/ChangeEngineServiceImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/services/ChangeEngineServiceImpl.java index 8b2e9d4b9..592c4d655 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/services/ChangeEngineServiceImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/ChangeEngineServiceImpl.java @@ -6,6 +6,8 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.folio.rest.RestVerticle.MODULE_SPECIFIC_ARGS; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_ERROR; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_EDIFACT_RECORD_PARSED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_FOR_DELETE_RECEIVED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_FOR_UPDATE_RECEIVED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_RAW_RECORDS_CHUNK_PARSED; @@ -51,6 +53,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.folio.MappingProfile; +import org.folio.services.journal.JournalUtil; import org.folio.dao.JobExecutionSourceChunkDao; import org.folio.dataimport.util.OkapiConnectionParams; import org.folio.dataimport.util.marc.MarcRecordAnalyzer; @@ -63,6 +66,7 @@ import org.folio.rest.jaxrs.model.ActionProfile.Action; import org.folio.rest.jaxrs.model.ActionProfile.FolioRecord; import org.folio.rest.jaxrs.model.DataImportEventPayload; +import org.folio.rest.jaxrs.model.DataImportEventTypes; import org.folio.rest.jaxrs.model.EntityType; import org.folio.rest.jaxrs.model.ErrorRecord; import org.folio.rest.jaxrs.model.ExternalIdsHolder; @@ -121,6 +125,8 @@ public class ChangeEngineServiceImpl implements ChangeEngineService { private final JobProfileSnapshotValidationService jobProfileSnapshotValidationService; private final KafkaConfig kafkaConfig; private final FieldModificationService fieldModificationService; + private final IncomingRecordService incomingRecordService; + private final JournalRecordService journalRecordService; @Value("${srm.kafka.RawChunksKafkaHandler.maxDistributionNum:100}") private int maxDistributionNum; @@ -136,7 +142,9 @@ public ChangeEngineServiceImpl(@Autowired JobExecutionSourceChunkDao jobExecutio @Autowired MappingMetadataService mappingMetadataService, @Autowired JobProfileSnapshotValidationService jobProfileSnapshotValidationService, @Autowired KafkaConfig kafkaConfig, - @Autowired FieldModificationService fieldModificationService) { + @Autowired FieldModificationService fieldModificationService, + @Autowired IncomingRecordService incomingRecordService, + @Autowired JournalRecordService journalRecordService) { this.jobExecutionSourceChunkDao = jobExecutionSourceChunkDao; this.jobExecutionService = jobExecutionService; this.marcRecordAnalyzer = marcRecordAnalyzer; @@ -146,6 +154,8 @@ public ChangeEngineServiceImpl(@Autowired JobExecutionSourceChunkDao jobExecutio this.jobProfileSnapshotValidationService = jobProfileSnapshotValidationService; this.kafkaConfig = kafkaConfig; this.fieldModificationService = fieldModificationService; + this.incomingRecordService = incomingRecordService; + this.journalRecordService = journalRecordService; } @Override @@ -157,50 +167,18 @@ public Future> parseRawRecordsChunkForJobExecution(RawRecordsDto ch params.getTenantId(), acceptInstanceId, params); futureParsedRecords - .compose(parsedRecords -> isJobProfileCompatibleWithRecordsType(jobExecution.getJobProfileSnapshotWrapper(), parsedRecords) - ? Future.succeededFuture(parsedRecords) - : Future.failedFuture(prepareWrongJobProfileErrorMessage(jobExecution, parsedRecords))) + .compose(parsedRecords -> { + saveIncomingAndJournalRecords(parsedRecords, params.getTenantId()); + + return isJobProfileCompatibleWithRecordsType(jobExecution.getJobProfileSnapshotWrapper(), parsedRecords) + ? Future.succeededFuture(parsedRecords) + : Future.failedFuture(prepareWrongJobProfileErrorMessage(jobExecution, parsedRecords)); + }) .compose(parsedRecords -> ensureMappingMetaDataSnapshot(jobExecution.getId(), parsedRecords, params) .map(parsedRecords)) .onSuccess(parsedRecords -> { fillParsedRecordsWithAdditionalFields(parsedRecords); - - if (updateMarcActionExists(jobExecution) || updateInstanceActionExists(jobExecution) - || isCreateOrUpdateItemOrHoldingsActionExists(jobExecution, parsedRecords) || isMarcAuthorityMatchProfile(jobExecution)) { - hrIdFieldService.move001valueTo035Field(parsedRecords); - updateRecords(parsedRecords, jobExecution, params) - .onSuccess(ar -> promise.complete(parsedRecords)) - .onFailure(promise::fail); - } else if (deleteMarcActionExists(jobExecution)) { - deleteRecords(parsedRecords, jobExecution, params) - .onSuccess(ar -> promise.complete(parsedRecords)) - .onFailure(promise::fail); - } else { - saveRecords(params, jobExecution, parsedRecords) - .onComplete(postAr -> { - if (postAr.failed()) { - StatusDto statusDto = new StatusDto() - .withStatus(StatusDto.Status.ERROR) - .withErrorStatus(StatusDto.ErrorStatus.RECORD_UPDATE_ERROR); - jobExecutionService.updateJobExecutionStatus(jobExecution.getId(), statusDto, params) - .onComplete(r -> { - if (r.failed()) { - LOGGER.warn("parseRawRecordsChunkForJobExecution:: Error during update jobExecution and snapshot status", r.cause()); - } - }); - jobExecutionSourceChunkDao.getById(sourceChunkId, params.getTenantId()) - .compose(optional -> optional - .map(sourceChunk -> jobExecutionSourceChunkDao - .update(sourceChunk.withState(JobExecutionSourceChunk.State.ERROR), params.getTenantId())) - .orElseThrow(() -> new NotFoundException(String.format( - "Couldn't update failed jobExecutionSourceChunk status to ERROR, jobExecutionSourceChunk with id %s was not found", - sourceChunkId)))) - .onComplete(ar -> promise.fail(postAr.cause())); - } else { - promise.complete(parsedRecords); - } - }); - } + processRecords(parsedRecords, jobExecution, params, sourceChunkId, promise); }).onFailure(th -> { LOGGER.warn("parseRawRecordsChunkForJobExecution:: Error parsing records: {}", th.getMessage()); promise.fail(th); @@ -208,6 +186,94 @@ public Future> parseRawRecordsChunkForJobExecution(RawRecordsDto ch return promise.future(); } + private void processRecords(List parsedRecords, JobExecution jobExecution, OkapiConnectionParams params, + String sourceChunkId, Promise> promise) { + switch (getAction(parsedRecords, jobExecution)) { + case UPDATE_RECORD -> { + hrIdFieldService.move001valueTo035Field(parsedRecords); + updateRecords(parsedRecords, jobExecution, params) + .onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail); + } + case DELETE_RECORD -> deleteRecords(parsedRecords, jobExecution, params) + .onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail); + case SEND_ERROR -> sendEvents(parsedRecords, jobExecution, params, DI_ERROR) + .onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail); + case SEND_MARC_BIB -> sendEvents(parsedRecords, jobExecution, params, DI_INCOMING_MARC_BIB_RECORD_PARSED) + .onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail); + case SEND_EDIFACT -> sendEvents(parsedRecords, jobExecution, params, DI_INCOMING_EDIFACT_RECORD_PARSED) + .onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail); + default -> saveRecords(jobExecution, sourceChunkId, params, parsedRecords, promise); + } + } + + private ActionType getAction(List parsedRecords, JobExecution jobExecution) { + if (updateMarcActionExists(jobExecution) || updateInstanceActionExists(jobExecution) + || isCreateOrUpdateItemOrHoldingsActionExists(jobExecution, parsedRecords) || isMarcAuthorityMatchProfile(jobExecution)) { + return ActionType.UPDATE_RECORD; + } + if (deleteMarcActionExists(jobExecution)) { + return ActionType.DELETE_RECORD; + } + if (parsedRecords.isEmpty()) { + return ActionType.SAVE_RECORD; + } + RecordType recordType = parsedRecords.get(0).getRecordType(); + if (recordType == RecordType.MARC_BIB) { + return ActionType.SEND_MARC_BIB; + } + if (recordType == RecordType.EDIFACT) { + return ActionType.SEND_EDIFACT; + } + if (recordType == null) { + return ActionType.SEND_ERROR; + } + return ActionType.SAVE_RECORD; + } + + private enum ActionType { + UPDATE_RECORD, DELETE_RECORD, SEND_ERROR, SEND_MARC_BIB, SEND_EDIFACT, SAVE_RECORD + } + + private void saveRecords(JobExecution jobExecution, String sourceChunkId, OkapiConnectionParams params, List parsedRecords, Promise> promise) { + saveRecords(params, jobExecution, parsedRecords) + .onComplete(postAr -> { + if (postAr.failed()) { + StatusDto statusDto = new StatusDto() + .withStatus(StatusDto.Status.ERROR) + .withErrorStatus(StatusDto.ErrorStatus.RECORD_UPDATE_ERROR); + jobExecutionService.updateJobExecutionStatus(jobExecution.getId(), statusDto, params) + .onComplete(r -> { + if (r.failed()) { + LOGGER.warn("parseRawRecordsChunkForJobExecution:: Error during update jobExecution with id '{}' and snapshot status", + jobExecution.getId(), r.cause()); + } + }); + jobExecutionSourceChunkDao.getById(sourceChunkId, params.getTenantId()) + .compose(optional -> optional + .map(sourceChunk -> jobExecutionSourceChunkDao + .update(sourceChunk.withState(JobExecutionSourceChunk.State.ERROR), params.getTenantId())) + .orElseThrow(() -> new NotFoundException(String.format( + "Couldn't update failed jobExecutionSourceChunk status to ERROR, jobExecutionSourceChunk with id %s was not found, jobExecutionId: %s", + sourceChunkId, jobExecution.getId())))) + .onComplete(ar -> promise.fail(postAr.cause())); + } else { + promise.complete(parsedRecords); + } + }); + } + + private Future sendEvents(List records, JobExecution jobExecution, OkapiConnectionParams params, DataImportEventTypes eventType) { + LOGGER.info("sendEvents:: Sending events with type: {}, jobExecutionId: {}", eventType.value(), jobExecution.getId()); + return recordsPublishingService.sendEventsWithRecords(records, jobExecution.getId(), params, eventType.value()); + } + + private void saveIncomingAndJournalRecords(List parsedRecords, String tenantId) { + if (!parsedRecords.isEmpty()) { + incomingRecordService.saveBatch(JournalUtil.buildIncomingRecordsByRecords(parsedRecords), tenantId); + journalRecordService.saveBatch(JournalUtil.buildJournalRecordsByRecords(parsedRecords), tenantId); + } + } + /** * Checks whether job profile snapshot is compatible with record type of the specified {@code records}. * Returns {@code true} for the specified records that have not been parsed successfully and therefore @@ -741,6 +807,7 @@ private Future> saveRecords(OkapiConnectionParams params, JobExecut if (CollectionUtils.isEmpty(parsedRecords)) { return Future.succeededFuture(); } + LOGGER.info("saveRecords:: Saving records in SRS, amount: {}, jobExecutionId: {}", parsedRecords.size(), jobExecution.getId()); RecordCollection recordCollection = new RecordCollection() .withRecords(parsedRecords) .withTotalRecords(parsedRecords.size()); diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordService.java b/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordService.java new file mode 100644 index 000000000..e4900251c --- /dev/null +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordService.java @@ -0,0 +1,29 @@ +package org.folio.services; + +import io.vertx.core.Future; +import org.folio.rest.jaxrs.model.IncomingRecord; + +import java.util.List; +import java.util.Optional; + +/** + * {@link IncomingRecord} Service interface + */ +public interface IncomingRecordService { + + /** + * Searches for {@link IncomingRecord} by id + * + * @param id incomingRecord id + * @return future with optional incomingRecord + */ + Future> getById(String id, String tenantId); + + /** + * Saves {@link IncomingRecord}s into DB + * + * @param incomingRecords incoming records to be saved + * @param tenantId tenant + */ + void saveBatch(List incomingRecords, String tenantId); +} diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordServiceImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordServiceImpl.java new file mode 100644 index 000000000..ebf60452f --- /dev/null +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/IncomingRecordServiceImpl.java @@ -0,0 +1,27 @@ +package org.folio.services; + +import io.vertx.core.Future; +import org.folio.dao.IncomingRecordDao; +import org.folio.rest.jaxrs.model.IncomingRecord; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.List; +import java.util.Optional; + +@Service +public class IncomingRecordServiceImpl implements IncomingRecordService { + + @Autowired + private IncomingRecordDao incomingRecordDao; + + @Override + public Future> getById(String id, String tenantId) { + return incomingRecordDao.getById(id, tenantId); + } + + @Override + public void saveBatch(List incomingRecords, String tenantId) { + incomingRecordDao.saveBatch(incomingRecords, tenantId); + } +} diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordService.java b/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordService.java index d17c65c38..58d324e5d 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordService.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordService.java @@ -2,10 +2,12 @@ import io.vertx.core.Future; import org.folio.rest.jaxrs.model.JobExecutionSummaryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDtoCollection; +import org.folio.rest.jaxrs.model.JournalRecord; import org.folio.rest.jaxrs.model.JournalRecordCollection; import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; +import java.util.List; import java.util.Optional; /** @@ -40,13 +42,13 @@ public interface JournalRecordService { * @param sortBy sorting criteria * @param order sorting direction * @param errorsOnly filtering by error field - * @param errorsOnly filtering by entity type + * @param entityType filtering by entity type * @param limit limit * @param offset offset * @param tenantId tenantId * @return future with JobLogEntryDto collection */ - Future getJobLogEntryDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId); + Future getRecordProcessingLogDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId); /** * Searches for RecordProcessingLogDto entity by jobExecutionId and recordId @@ -75,4 +77,12 @@ public interface JournalRecordService { * @return Future with JournalRecords updated number */ Future updateErrorJournalRecordsByOrderIdAndJobExecution(String jobExecutionId, String orderId, String error, String tenantId); + + /** + * Saves set of {@link JournalRecord} entities + * + * @param journalRecords journal records to save + * @param tenantId tenant id + */ + void saveBatch(List journalRecords, String tenantId); } diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordServiceImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordServiceImpl.java index 6b9c74c3a..d0bf68848 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordServiceImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/JournalRecordServiceImpl.java @@ -1,15 +1,16 @@ package org.folio.services; import io.vertx.core.Future; -import io.vertx.core.json.JsonObject; import org.folio.dao.JournalRecordDao; import org.folio.rest.jaxrs.model.JobExecutionSummaryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDtoCollection; +import org.folio.rest.jaxrs.model.JournalRecord; import org.folio.rest.jaxrs.model.JournalRecordCollection; import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; +import java.util.List; import java.util.Optional; /** @@ -37,8 +38,8 @@ public Future getJobExecutionJournalRecords(String jobE } @Override - public Future getJobLogEntryDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId) { - return journalRecordDao.getJobLogEntryDtoCollection(jobExecutionId, sortBy, order, errorsOnly, entityType, limit, offset, tenantId); + public Future getRecordProcessingLogDtoCollection(String jobExecutionId, String sortBy, String order, boolean errorsOnly, String entityType, int limit, int offset, String tenantId) { + return journalRecordDao.getRecordProcessingLogDtoCollection(jobExecutionId, sortBy, order, errorsOnly, entityType, limit, offset, tenantId); } @Override @@ -55,4 +56,9 @@ public Future> getJobExecutionSummaryDto(String public Future updateErrorJournalRecordsByOrderIdAndJobExecution(String jobExecutionId, String orderId, String error, String tenantId) { return journalRecordDao.updateErrorJournalRecordsByOrderIdAndJobExecution(jobExecutionId, orderId, error, tenantId); } + + @Override + public void saveBatch(List journalRecords, String tenantId) { + journalRecordDao.saveBatch(journalRecords, tenantId); + } } diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/RecordsPublishingServiceImpl.java b/mod-source-record-manager-server/src/main/java/org/folio/services/RecordsPublishingServiceImpl.java index 6ffa06fe1..326254bb7 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/services/RecordsPublishingServiceImpl.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/RecordsPublishingServiceImpl.java @@ -19,6 +19,7 @@ import org.folio.services.exceptions.RawChunkRecordsParsingException; import org.folio.services.exceptions.RecordsPublishingException; import org.folio.services.util.EventHandlingUtil; +import org.folio.services.util.RecordConversionUtil; import org.folio.verticle.consumers.errorhandlers.payloadbuilders.DiErrorPayloadBuilder; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; @@ -41,24 +42,24 @@ public class RecordsPublishingServiceImpl implements RecordsPublishingService { public static final String RECORD_ID_HEADER = "recordId"; public static final String USER_ID_HEADER = "userId"; private static final AtomicInteger indexer = new AtomicInteger(); - public static final String ERROR_KEY = "ERROR"; + @Value("${srm.kafka.CreatedRecordsKafkaHandler.maxDistributionNum:100}") + private int maxDistributionNum; + private JobExecutionService jobExecutionService; private DataImportPayloadContextBuilder payloadContextBuilder; private KafkaConfig kafkaConfig; - - @Value("${srm.kafka.CreatedRecordsKafkaHandler.maxDistributionNum:100}") - private int maxDistributionNum; - @Autowired private List errorPayloadBuilders; public RecordsPublishingServiceImpl(@Autowired JobExecutionService jobExecutionService, @Autowired DataImportPayloadContextBuilder payloadContextBuilder, - @Autowired KafkaConfig kafkaConfig) { + @Autowired KafkaConfig kafkaConfig, + @Autowired List errorPayloadBuilders) { this.jobExecutionService = jobExecutionService; this.payloadContextBuilder = payloadContextBuilder; this.kafkaConfig = kafkaConfig; + this.errorPayloadBuilders = errorPayloadBuilders; } @Override @@ -82,15 +83,17 @@ private Future sendRecords(List createdRecords, JobExecution jo for (Record record : createdRecords) { String key = String.valueOf(indexer.incrementAndGet() % maxDistributionNum); try { - if (isParsedContentExists(record)) { + if (record.getRecordType() != null && isParsedContentExists(record)) { DataImportEventPayload payload = prepareEventPayload(record, profileSnapshotWrapper, params, eventType); params.getHeaders().set(RECORD_ID_HEADER, record.getId()); params.getHeaders().set(USER_ID_HEADER, jobExecution.getUserId()); futures.add(sendEventToKafka(params.getTenantId(), Json.encode(payload), eventType, KafkaHeaderUtils.kafkaHeadersFromMultiMap(params.getHeaders()), kafkaConfig, key)); - } - else { - futures.add(sendDiErrorEvent(new RawChunkRecordsParsingException(record.getErrorRecord().getDescription()), + } else { + String cause = record.getErrorRecord() == null + ? format("Cannot send event for individual record with recordType: %s", record.getRecordType()) + : record.getErrorRecord().getDescription(); + futures.add(sendDiErrorEvent(new RawChunkRecordsParsingException(cause), params, jobExecution.getId(), params.getTenantId(), record)); } } catch (Exception e) { @@ -151,11 +154,8 @@ private DataImportEventPayload prepareEventPayload(Record record, ProfileSnapsho .withToken(params.getToken()); } - public Future sendDiErrorEvent(Throwable throwable, - OkapiConnectionParams okapiParams, - String jobExecutionId, - String tenantId, - Record currentRecord) { + public Future sendDiErrorEvent(Throwable throwable, OkapiConnectionParams okapiParams, String jobExecutionId, + String tenantId, Record currentRecord) { okapiParams.getHeaders().set(RECORD_ID_HEADER, currentRecord.getId()); for (DiErrorPayloadBuilder payloadBuilder: errorPayloadBuilders) { if (payloadBuilder.isEligible(currentRecord.getRecordType())) { @@ -166,6 +166,25 @@ public Future sendDiErrorEvent(Throwable throwable, } } LOGGER.warn("sendDiErrorEvent:: Appropriate DI_ERROR payload builder not found, DI_ERROR without records info will be send"); - return Future.failedFuture(throwable); + sendDiError(throwable, jobExecutionId, okapiParams, currentRecord); + return Future.succeededFuture(true); + } + + private void sendDiError(Throwable throwable, String jobExecutionId, OkapiConnectionParams okapiParams, Record record) { + HashMap context = new HashMap<>(); + context.put(ERROR_KEY, throwable.getMessage()); + if (record != null && record.getRecordType() != null) { + context.put(RecordConversionUtil.getEntityType(record).value(), Json.encode(record)); + } + + DataImportEventPayload payload = new DataImportEventPayload() + .withEventType(DI_ERROR.value()) + .withJobExecutionId(jobExecutionId) + .withOkapiUrl(okapiParams.getOkapiUrl()) + .withTenant(okapiParams.getTenantId()) + .withToken(okapiParams.getToken()) + .withContext(context); + EventHandlingUtil.sendEventToKafka(okapiParams.getTenantId(), Json.encode(payload), DI_ERROR.value(), + KafkaHeaderUtils.kafkaHeadersFromMultiMap(okapiParams.getHeaders()), kafkaConfig, null); } } diff --git a/mod-source-record-manager-server/src/main/java/org/folio/services/journal/JournalUtil.java b/mod-source-record-manager-server/src/main/java/org/folio/services/journal/JournalUtil.java index cd1522ee7..d5aa1022d 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/services/journal/JournalUtil.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/services/journal/JournalUtil.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.Logger; import org.folio.DataImportEventPayload; import org.folio.rest.jaxrs.model.DataImportEventTypes; +import org.folio.rest.jaxrs.model.IncomingRecord; import org.folio.rest.jaxrs.model.JournalRecord; import org.folio.rest.jaxrs.model.Record; @@ -24,6 +25,7 @@ import static org.apache.commons.lang3.StringUtils.EMPTY; import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_ERROR; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INVENTORY_INSTANCE_CREATED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_LOG_SRS_MARC_BIB_RECORD_UPDATED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_UPDATED; import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.AUTHORITY; @@ -52,6 +54,7 @@ public class JournalUtil { private static final String NOT_MATCHED_NUMBER = "NOT_MATCHED_NUMBER"; public static final String PERMANENT_LOCATION_ID_KEY = "permanentLocationId"; private static final String CENTRAL_TENANT_ID_KEY = "CENTRAL_TENANT_ID"; + private static final String CURRENT_EVENT_TYPE = "CURRENT_EVENT_TYPE"; private JournalUtil() { @@ -64,6 +67,40 @@ private static String extractRecord(HashMap context) { .orElse(EMPTY); } + public static List buildJournalRecordsByRecords(List records) { + return records.stream().map(record -> { + JournalRecord journalRecord = new JournalRecord() + .withId(UUID.randomUUID().toString()) + .withJobExecutionId(record.getSnapshotId()) + .withSourceId(record.getId()) + .withSourceRecordOrder(record.getOrder()) + .withActionType(JournalRecord.ActionType.PARSE) + .withActionDate(new Date()) + .withActionStatus(record.getErrorRecord() == null ? JournalRecord.ActionStatus.COMPLETED : JournalRecord.ActionStatus.ERROR); + if (record.getErrorRecord() != null) { + journalRecord.setError(record.getErrorRecord().getDescription()); + } + return journalRecord; + }).toList(); + } + + public static List buildIncomingRecordsByRecords(List records) { + return records.stream().map(record -> { + IncomingRecord incomingRecord = new IncomingRecord() + .withId(record.getId()) + .withJobExecutionId(record.getSnapshotId()) + .withOrder(record.getOrder()) + .withRawRecordContent(record.getRawRecord().getContent()); + if (record.getRecordType() != null) { + incomingRecord.setRecordType(IncomingRecord.RecordType.fromValue(record.getRecordType().value())); + } + if (record.getParsedRecord() != null) { + incomingRecord.setParsedRecordContent(String.valueOf(record.getParsedRecord().getContent())); + } + return incomingRecord; + }).toList(); + } + public static List buildJournalRecordsByEvent(DataImportEventPayload eventPayload, JournalRecord.ActionType actionType, JournalRecord.EntityType entityType, JournalRecord.ActionStatus actionStatus) throws JournalRecordMapperException { try { @@ -82,17 +119,8 @@ record = new ObjectMapper().readValue(recordAsString, Record.class); } String entityAsString = eventPayloadContext.get(entityType.value()); - JournalRecord journalRecord = new JournalRecord() - .withJobExecutionId(record.getSnapshotId()) - .withSourceId(record.getId()) - .withSourceRecordOrder(record.getOrder()) - .withEntityType(entityType) - .withActionType(actionType) - .withActionDate(new Date()) - .withActionStatus(actionStatus) - // tenantId field is filled in only for the case when record/entity has been changed on central tenant - // by data import initiated on a member tenant - .withTenantId(eventPayload.getContext().get(CENTRAL_TENANT_ID_KEY)); + JournalRecord journalRecord = buildCommonJournalRecord(actionStatus, actionType, record, eventPayload) + .withEntityType(entityType); if (DI_ERROR == DataImportEventTypes.fromValue(eventPayload.getEventType())) { journalRecord.setError(eventPayloadContext.get(ERROR_KEY)); @@ -126,6 +154,13 @@ record = new ObjectMapper().readValue(recordAsString, Record.class); if (entityType == PO_LINE) { journalRecord.setOrderId(entityJson.getString("purchaseOrderId")); } + if (eventPayload.getEventType().equals(DI_INVENTORY_INSTANCE_CREATED.value()) || + (eventPayloadContext.containsKey(CURRENT_EVENT_TYPE) + && DataImportEventTypes.fromValue(eventPayloadContext.get(CURRENT_EVENT_TYPE)) == DI_INVENTORY_INSTANCE_CREATED + && entityType.equals(INSTANCE))) { + var journalRecordWithMarcBib = buildJournalRecordWithMarcBibType(actionStatus, actionType, record, eventPayload, eventPayloadContext); + return Lists.newArrayList(journalRecord, journalRecordWithMarcBib); + } return Lists.newArrayList(journalRecord); } if ((entityType == HOLDINGS || entityType == ITEM || eventPayloadContext.get(MULTIPLE_ERRORS_KEY) != null) @@ -162,6 +197,32 @@ private static boolean isMarcBibUpdateEventReceived(DataImportEventPayload event .map(mp -> DI_SRS_MARC_BIB_RECORD_UPDATED == DataImportEventTypes.fromValue(mp)).orElse(false); } + private static JournalRecord buildJournalRecordWithMarcBibType(JournalRecord.ActionStatus actionStatus, JournalRecord.ActionType actionType, Record currentRecord, + DataImportEventPayload eventPayload, HashMap eventPayloadContext) { + String marcBibEntityAsString = eventPayloadContext.get(MARC_BIBLIOGRAPHIC.value()); + String marcBibEntityId = new JsonObject(marcBibEntityAsString).getString(ID_KEY); + + return buildCommonJournalRecord(actionStatus, actionType, currentRecord, eventPayload) + .withEntityId(marcBibEntityId) + .withEntityType(MARC_BIBLIOGRAPHIC); + } + + private static JournalRecord buildCommonJournalRecord(JournalRecord.ActionStatus actionStatus, JournalRecord.ActionType actionType, Record currentRecord, + DataImportEventPayload eventPayload){ + String tenantId = eventPayload.getContext().get(CENTRAL_TENANT_ID_KEY); + + return new JournalRecord() + .withJobExecutionId(currentRecord.getSnapshotId()) + .withSourceId(currentRecord.getId()) + .withSourceRecordOrder(currentRecord.getOrder()) + .withActionType(actionType) + .withActionDate(new Date()) + .withActionStatus(actionStatus) + // tenantId field is filled in only for the case when record/entity has been changed on central tenant + // by data import initiated on a member tenant + .withTenantId(tenantId); + } + private static List processHoldings(JournalRecord.ActionType actionType, JournalRecord.EntityType entityType, JournalRecord.ActionStatus actionStatus, HashMap eventPayloadContext, Record record) { JsonArray multipleHoldings = getJsonArrayOfHoldings(eventPayloadContext.get(entityType.value())); diff --git a/mod-source-record-manager-server/src/main/java/org/folio/verticle/consumers/StoredRecordChunksKafkaHandler.java b/mod-source-record-manager-server/src/main/java/org/folio/verticle/consumers/StoredRecordChunksKafkaHandler.java index 2a9e815fb..3a8a0ab70 100644 --- a/mod-source-record-manager-server/src/main/java/org/folio/verticle/consumers/StoredRecordChunksKafkaHandler.java +++ b/mod-source-record-manager-server/src/main/java/org/folio/verticle/consumers/StoredRecordChunksKafkaHandler.java @@ -35,7 +35,6 @@ import org.springframework.stereotype.Component; import javax.ws.rs.NotFoundException; -import java.util.Arrays; import java.util.Date; import java.util.LinkedHashMap; import java.util.List; @@ -45,10 +44,10 @@ import static java.lang.String.format; import static org.apache.commons.lang3.ObjectUtils.allNotNull; -import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_EDIFACT_RECORD_CREATED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_EDIFACT_RECORD_PARSED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_BIB_FOR_ORDER_CREATED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_CREATED; -import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_CREATED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_HOLDING_RECORD_CREATED; import static org.folio.rest.jaxrs.model.JournalRecord.ActionStatus.COMPLETED; import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.CREATE; @@ -70,10 +69,10 @@ public class StoredRecordChunksKafkaHandler implements AsyncRecordHandler RECORD_TYPE_TO_EVENT_TYPE = Map.of( - MARC_BIB, DI_SRS_MARC_BIB_RECORD_CREATED, + MARC_BIB, DI_INCOMING_MARC_BIB_RECORD_PARSED, MARC_AUTHORITY, DI_SRS_MARC_AUTHORITY_RECORD_CREATED, MARC_HOLDING, DI_SRS_MARC_HOLDING_RECORD_CREATED, - EDIFACT, DI_EDIFACT_RECORD_CREATED + EDIFACT, DI_INCOMING_EDIFACT_RECORD_PARSED ); private RecordsPublishingService recordsPublishingService; @@ -124,7 +123,7 @@ public Future handle(KafkaConsumerRecord record) { // we only know record type by inspecting the records, assuming records are homogeneous type and defaulting to previous static value DataImportEventTypes eventType = !storedRecords.isEmpty() && RECORD_TYPE_TO_EVENT_TYPE.containsKey(storedRecords.get(0).getRecordType()) ? RECORD_TYPE_TO_EVENT_TYPE.get(storedRecords.get(0).getRecordType()) - : DI_SRS_MARC_BIB_RECORD_CREATED; + : DI_INCOMING_MARC_BIB_RECORD_PARSED; LOGGER.debug("handle:: RecordsBatchResponse has been received, starting processing chunkId: {} chunkNumber: {} jobExecutionId: {}", chunkId, chunkNumber, jobExecutionId); saveCreatedRecordsInfoToDataImportLog(storedRecords, okapiConnectionParams.getTenantId()); diff --git a/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_get_job_log_entries_function.sql b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_get_job_log_entries_function.sql index 358fd4508..70fdd0a7d 100644 --- a/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_get_job_log_entries_function.sql +++ b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_get_job_log_entries_function.sql @@ -25,120 +25,383 @@ DROP FUNCTION IF EXISTS get_job_log_entries(uuid,text,text,bigint,bigint,boolean -- Script to create function to get data import job log entries (jobLogEntry). CREATE OR REPLACE FUNCTION get_job_log_entries(jobExecutionId uuid, sortingField text, sortingDir text, limitVal bigint, offsetVal bigint, errorsOnly boolean, entityType text) - RETURNS TABLE(job_execution_id uuid, source_id uuid, source_record_order integer, invoiceline_number text, title text, - source_record_action_status text, instance_action_status text, holdings_action_status text, item_action_status text, - authority_action_status text, po_line_action_status text, invoice_action_status text, error text, total_count bigint, - invoice_line_journal_record_id uuid, source_record_entity_type text, holdings_entity_hrid text[], source_record_order_array integer[]) + RETURNS TABLE(job_execution_id uuid, incoming_record_id uuid, source_id uuid, source_record_order integer, invoiceline_number text, title text, + source_record_action_status text, source_entity_error text, source_record_tenant_id text,instance_action_status text, instance_entity_id text, instance_entity_hrid text, instance_entity_error text, + instance_entity_tenant_id text, holdings_action_status text, holdings_entity_id text, holdings_entity_hrid text, holdings_permanent_location_id text, + holdings_entity_error text, item_action_status text, item_entity_id text, item_entity_hrid text, item_entity_error text, item_holdings_id text, authority_action_status text, + authority_entity_id text, authority_entity_error text, po_line_action_status text, po_line_entity_id text, po_line_entity_hrid text, po_line_entity_error text, + order_entity_id text, invoice_action_status text, invoice_entity_id text[], invoice_entity_hrid text[], invoice_entity_error text, invoice_line_action_status text, + invoice_line_entity_id text, invoice_line_entity_hrid text, invoice_line_entity_error text, total_count bigint, + invoice_line_journal_record_id uuid, source_record_entity_type text, source_record_order_array integer[]) AS $$ DECLARE - v_sortingField text DEFAULT sortingfield; - v_entityAttribute text[] DEFAULT ARRAY[upper(entityType)]; + v_sortingField text DEFAULT sortingfield; + v_entityAttribute text[] DEFAULT ARRAY[upper(entityType)]; BEGIN --- Using the source_record_order column in the array type provides support for sorting invoices and marc records. - IF sortingField = 'source_record_order' THEN - v_sortingField := 'source_record_order_array'; - END IF; - - IF entityType = 'MARC' THEN - v_entityAttribute := ARRAY['MARC_BIBLIOGRAPHIC', 'MARC_HOLDINGS', 'MARC_AUTHORITY']; - END IF; - - RETURN QUERY EXECUTE format(' -SELECT records_actions.job_execution_id, records_actions.source_id, records_actions.source_record_order, '''' as invoiceline_number, - rec_titles.title, + -- Using the source_record_order column in the array type provides support for sorting invoices and marc records. + IF sortingField = 'source_record_order' THEN + v_sortingField := 'source_record_order_array'; + END IF; + + IF entityType = 'MARC' THEN + v_entityAttribute := ARRAY['MARC_BIBLIOGRAPHIC', 'MARC_HOLDINGS', 'MARC_AUTHORITY']; + END IF; + + RETURN QUERY EXECUTE format(' +WITH + temp_result AS ( + SELECT id, job_execution_id, source_id, entity_type, entity_id, entity_hrid, + CASE + WHEN error_max != '''' OR action_type = ''NON_MATCH'' THEN ''DISCARDED'' + WHEN action_type = ''CREATE'' THEN ''CREATED'' + WHEN action_type IN (''UPDATE'', ''MODIFY'') THEN ''UPDATED'' + END AS action_type, + action_status, action_date, source_record_order, error, title, tenant_id, instance_id, holdings_id, order_id, permanent_location_id + FROM journal_records + INNER JOIN ( + SELECT entity_type as entity_type_max, entity_id as entity_id_max, action_status as action_status_max, max(error) AS error_max, + (array_agg(id ORDER BY array_position(array[''CREATE'', ''UPDATE'', ''MODIFY'', ''NON_MATCH''], action_type)))[1] AS id_max + FROM journal_records + WHERE job_execution_id = ''%1$s'' AND entity_type NOT IN (''EDIFACT'', ''INVOICE'') + GROUP BY entity_type, entity_id, action_status, source_id, source_record_order + ) AS action_type_by_source ON journal_records.id = action_type_by_source.id_max + ), + instances AS ( + SELECT action_type, entity_id, source_id, entity_hrid, error, job_execution_id, title, source_record_order, tenant_id + FROM temp_result WHERE entity_type = ''INSTANCE'' + ), + holdings AS ( + SELECT tmp.action_type, tmp.entity_type, tmp.entity_id, tmp.entity_hrid, tmp.error, tmp.instance_id, + tmp.permanent_location_id, tmp.job_execution_id, tmp.source_id, tmp.title, tmp.source_record_order + FROM temp_result tmp + INNER JOIN + (SELECT + CASE + WHEN EXISTS (SELECT condition_result.entity_id FROM temp_result condition_result + WHERE (condition_result.action_type=''CREATED'' AND condition_result.entity_type=''HOLDINGS'') + OR + (condition_result.action_type=''DISCARDED'' AND condition_result.error != '''' AND condition_result.entity_type=''HOLDINGS'')) + THEN + (SELECT deep_nested.id + FROM temp_result deep_nested + WHERE + (deep_nested.action_type=''CREATED'' AND deep_nested.id = nested_result.id) + OR + (deep_nested.action_type=''DISCARDED'' AND deep_nested.error != '''' AND deep_nested.id = nested_result.id)) + ELSE + nested_result.id + END + FROM temp_result nested_result) AS joining_table + ON tmp.id = joining_table.id + WHERE tmp.entity_type=''HOLDINGS'' + ), + items AS ( + SELECT tmp.action_type, tmp.entity_id, tmp.holdings_id, tmp.entity_hrid, tmp.error, tmp.instance_id, + tmp.job_execution_id, tmp.source_id, tmp.title, tmp.source_record_order + FROM temp_result tmp + INNER JOIN + (SELECT + CASE + WHEN EXISTS (SELECT condition_result.entity_id FROM temp_result condition_result + WHERE (condition_result.action_type IN (''CREATED'',''UPDATED'') AND condition_result.entity_type=''ITEM'') + OR + (condition_result.action_type=''DISCARDED'' AND condition_result.error != '''' AND condition_result.entity_type=''ITEM'')) + THEN + (SELECT deep_nested.id + FROM temp_result deep_nested + WHERE + (deep_nested.action_type IN (''CREATED'',''UPDATED'') AND deep_nested.id = nested_result.id) + OR + (deep_nested.action_type=''DISCARDED'' AND deep_nested.error != '''' AND deep_nested.id = nested_result.id)) + ELSE + nested_result.id + END + FROM temp_result nested_result) AS joining_table + ON tmp.id = joining_table.id + WHERE tmp.entity_type=''ITEM'' + ), + po_lines AS ( + SELECT action_type,entity_id,entity_hrid,temp_result.source_id,error,order_id,temp_result.job_execution_id,temp_result.title,temp_result.source_record_order + FROM temp_result WHERE entity_type = ''PO_LINE'' + ), + authorities AS ( + SELECT action_type, entity_id, temp_result.source_id, error, temp_result.job_execution_id, temp_result.title, temp_result.source_record_order + FROM temp_result WHERE entity_type = ''AUTHORITY'' + ), + marc_authority AS ( + SELECT temp_result.job_execution_id, entity_id, title, source_record_order, action_type, error, source_id, tenant_id + FROM temp_result WHERE entity_type = ''MARC_AUTHORITY'' + ), + marc_holdings AS ( + SELECT temp_result.job_execution_id, entity_id, title, source_record_order, action_type, error, source_id, tenant_id + FROM temp_result WHERE entity_type = ''MARC_HOLDINGS'' + ) + +SELECT records_actions.job_execution_id AS job_execution_id, + records_actions.source_id AS source_id, + records_actions.source_id AS incoming_record_id, + records_actions.source_record_order AS source_record_order, + '''' as invoiceline_number, + coalesce(rec_titles.title, marc_holdings_info.title) AS title, CASE - WHEN marc_errors_number != 0 OR marc_actions[array_length(marc_actions, 1)] = ''NON_MATCH'' THEN ''DISCARDED'' - WHEN marc_actions[array_length(marc_actions, 1)] = ''CREATE'' THEN ''CREATED'' - WHEN marc_actions[array_length(marc_actions, 1)] = ''UPDATE'' THEN ''UPDATED'' - END AS source_record_action_status, - get_entity_status(instance_actions, instance_errors_number) AS instance_action_status, - get_entity_status(holdings_actions, holdings_errors_number) AS holdings_action_status, - get_entity_status(item_actions, item_errors_number) AS item_action_status, - get_entity_status(authority_actions, authority_errors_number) AS authority_action_status, - get_entity_status(po_line_actions, po_line_errors_number) AS po_line_action_status, - null AS invoice_action_status, rec_errors.error, records_actions.total_count, - null AS invoiceLineJournalRecordId, + WHEN marc_errors_number != 0 OR marc_actions[array_length(marc_actions, 1)] = ''NON_MATCH'' THEN ''DISCARDED'' + WHEN marc_actions[array_length(marc_actions, 1)] = ''CREATE'' THEN ''CREATED'' + WHEN marc_actions[array_length(marc_actions, 1)] IN (''UPDATE'', ''MODIFY'') THEN ''UPDATED'' + END AS source_record_action_status, + records_actions.source_entity_error AS source_entity_error, + records_actions.source_record_tenant_id AS source_record_tenant_id, + instance_info.action_type AS instance_action_status, + coalesce(instance_info.instance_entity_id, holdings_info.instance_id, items_info.instance_id) AS instance_entity_id, + instance_info.instance_entity_hrid AS instance_entity_hrid, + instance_info.instance_entity_error AS instance_entity_error, + instance_info.instance_entity_tenant_id AS instance_entity_tenant_id, + holdings_info.action_type AS holdings_action_status, + coalesce(holdings_info.holdings_entity_id, items_info.item_holdings_id) AS holdings_entity_id, + holdings_info.holdings_entity_hrid AS holdings_entity_hrid, + holdings_info.holdings_permanent_location_id AS holdings_permanent_location_id, + holdings_info.holdings_entity_error AS holdings_entity_error, + items_info.action_type AS item_action_status, + items_info.items_entity_id AS item_entity_id, + items_info.items_entity_hrid AS item_entity_hrid, + items_info.items_entity_error AS item_entity_error, + items_info.item_holdings_id AS item_holdings_id, + authority_info.action_type AS authority_action_status, + coalesce(authority_info.authority_entity_id, marc_authority_info.marc_authority_entity_id) AS authority_entity_id, + coalesce(authority_info.authority_entity_error, marc_authority_info.marc_authority_entity_error) AS authority_entity_error, + po_lines_info.action_type AS po_line_action_status, + po_lines_info.po_lines_entity_id AS po_lines_entity_id, + po_lines_info.po_lines_entity_hrid AS po_lines_entity_hrid, + po_lines_info.po_lines_entity_error AS po_lines_entity_error, + po_lines_info.po_lines_order_id AS order_entity_id, + null AS invoice_action_status, + null::text[] AS invoice_entity_id, + null::text[] AS invoice_entity_hrid, + null AS invoice_entity_error, + null AS invoice_line_action_status, + null AS invoice_line_entity_id, + null AS invoice_line_entity_hrid, + null AS invoice_line_entity_error, + records_actions.total_count, + null::UUID AS invoice_line_journal_record_id, records_actions.source_record_entity_type, - records_actions.holdings_entity_hrid, ARRAY[records_actions.source_record_order] AS source_record_order_array + FROM ( - SELECT journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id, - array_agg(action_type ORDER BY array_position(array[''MATCH'', ''NON_MATCH'', ''MODIFY'', ''UPDATE'', ''CREATE''], action_type)) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')) AS marc_actions, - count(journal_records.source_id) FILTER (WHERE (entity_type = ''MARC_BIBLIOGRAPHIC'' OR entity_type = ''MARC_HOLDINGS'' OR entity_type = ''MARC_AUTHORITY'') AND journal_records.error != '''') AS marc_errors_number, - array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH'', ''MATCH''], action_type)) FILTER (WHERE entity_type = ''INSTANCE'' AND (entity_id IS NOT NULL OR action_type = ''NON_MATCH'')) AS instance_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''INSTANCE'' AND journal_records.error != '''') AS instance_errors_number, - array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''HOLDINGS'') AS holdings_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''HOLDINGS'' AND journal_records.error != '''') AS holdings_errors_number, - array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''ITEM'') AS item_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''ITEM'' AND journal_records.error != '''') AS item_errors_number, - array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''AUTHORITY'') AS authority_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''AUTHORITY'' AND journal_records.error != '''') AS authority_errors_number, - array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''PO_LINE'') AS po_line_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''PO_LINE'' AND journal_records.error != '''') AS po_line_errors_number, - count(journal_records.source_id) OVER () AS total_count, - (array_agg(journal_records.entity_type) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')))[1] AS source_record_entity_type, - array_agg(journal_records.entity_hrid) FILTER (WHERE entity_hrid !='''' and entity_type = ''HOLDINGS'') as holdings_entity_hrid - FROM journal_records - WHERE journal_records.job_execution_id = ''%1$s'' and - entity_type in (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'', ''INSTANCE'', ''HOLDINGS'', ''ITEM'', ''AUTHORITY'', ''PO_LINE'') - GROUP BY journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id - HAVING count(journal_records.source_id) FILTER (WHERE (%3$L = ''ALL'' or entity_type = ANY(%4$L)) AND (NOT %2$L or journal_records.error <> '''')) > 0 + SELECT journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id, + array_agg(action_type ORDER BY array_position(array[''MATCH'', ''NON_MATCH'', ''MODIFY'', ''UPDATE'', ''CREATE''], action_type)) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')) AS marc_actions, + count(journal_records.source_id) FILTER (WHERE (entity_type = ''MARC_BIBLIOGRAPHIC'' OR entity_type = ''MARC_HOLDINGS'' OR entity_type = ''MARC_AUTHORITY'') AND journal_records.error != '''') AS marc_errors_number, + array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH'', ''MATCH''], action_type)) FILTER (WHERE entity_type = ''INSTANCE'' AND (entity_id IS NOT NULL OR action_type = ''NON_MATCH'')) AS instance_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''INSTANCE'' AND journal_records.error != '''') AS instance_errors_number, + array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''HOLDINGS'') AS holdings_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''HOLDINGS'' AND journal_records.error != '''') AS holdings_errors_number, + array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''ITEM'') AS item_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''ITEM'' AND journal_records.error != '''') AS item_errors_number, + array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''AUTHORITY'') AS authority_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''AUTHORITY'' AND journal_records.error != '''') AS authority_errors_number, + array_agg(action_type ORDER BY array_position(array[''CREATE'', ''MODIFY'', ''UPDATE'', ''NON_MATCH''], action_type)) FILTER (WHERE entity_type = ''PO_LINE'') AS po_line_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''PO_LINE'' AND journal_records.error != '''') AS po_line_errors_number, + count(journal_records.source_id) OVER () AS total_count, + (array_agg(journal_records.entity_type) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')))[1] AS source_record_entity_type, + (array_agg(journal_records.tenant_id) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')))[1] AS source_record_tenant_id, + (array_agg(journal_records.error) FILTER (WHERE entity_type IN (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'')))[1] AS source_entity_error + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'' and + entity_type in (''MARC_BIBLIOGRAPHIC'', ''MARC_HOLDINGS'', ''MARC_AUTHORITY'', ''INSTANCE'', ''HOLDINGS'', ''ITEM'', ''AUTHORITY'', ''PO_LINE'') + GROUP BY journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id + HAVING count(journal_records.source_id) FILTER (WHERE (%3$L = ''ALL'' or entity_type = ANY(%4$L)) AND (NOT %2$L or journal_records.error <> '''')) > 0 ) AS records_actions - LEFT JOIN (SELECT journal_records.source_id, - CASE - WHEN COUNT(*) = 1 THEN array_to_string(array_agg(journal_records.error), '', '') - ELSE ''['' || array_to_string(array_agg(journal_records.error), '', '') || '']'' - END AS error - FROM journal_records - WHERE journal_records.job_execution_id = ''%1$s'' AND journal_records.error != '''' GROUP BY journal_records.source_id) AS rec_errors - ON rec_errors.source_id = records_actions.source_id - LEFT JOIN (SELECT journal_records.source_id, journal_records.title - FROM journal_records - WHERE journal_records.job_execution_id = ''%1$s'') AS rec_titles - ON rec_titles.source_id = records_actions.source_id AND rec_titles.title IS NOT NULL + LEFT JOIN (SELECT journal_records.source_id, journal_records.title + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'') AS rec_titles + ON rec_titles.source_id = records_actions.source_id AND rec_titles.title IS NOT NULL + LEFT JOIN ( + SELECT instances.action_type AS action_type, + instances.job_execution_id AS job_execution_id, + instances.title AS title, + instances.source_id AS source_id, + instances.entity_id AS instance_entity_id, + instances.entity_hrid AS instance_entity_hrid, + instances.error AS instance_entity_error, + instances.tenant_id AS instance_entity_tenant_id + FROM instances +) AS instance_info ON instance_info.source_id = records_actions.source_id + + + LEFT JOIN ( + SELECT + holdings.action_type AS action_type, + holdings.source_id AS source_id, + holdings.title AS title, + holdings.entity_id AS holdings_entity_id, + holdings.entity_hrid AS holdings_entity_hrid, + holdings.permanent_location_id AS holdings_permanent_location_id, + holdings.error AS holdings_entity_error, + holdings.instance_id AS instance_id + FROM holdings +) AS holdings_info ON holdings_info.source_id = records_actions.source_id + + LEFT JOIN ( + SELECT items.action_type AS action_type, + items.source_id AS source_id, + items.title AS title, + items.entity_id AS items_entity_id, + items.entity_hrid AS items_entity_hrid, + items.error AS items_entity_error, + items.holdings_id AS item_holdings_id, + items.instance_id AS instance_id + FROM items +) AS items_info ON items_info.source_id = records_actions.source_id + + LEFT JOIN ( + SELECT po_lines.action_type AS action_type, + po_lines.source_id AS source_id, + po_lines.title AS title, + po_lines.entity_id AS po_lines_entity_id, + po_lines.entity_hrid AS po_lines_entity_hrid, + po_lines.error AS po_lines_entity_error, + po_lines.order_id AS po_lines_order_id + FROM po_lines +) AS po_lines_info ON po_lines_info.source_id = records_actions.source_id + + + LEFT JOIN ( + SELECT authorities.action_type AS action_type, + authorities.source_id AS source_id, + authorities.title AS title, + authorities.entity_id AS authority_entity_id, + authorities.error AS authority_entity_error + FROM authorities +) AS authority_info ON authority_info.source_id = records_actions.source_id + + LEFT JOIN ( + SELECT marc_authority.action_type AS action_type, + marc_authority.source_id AS source_id, + marc_authority.title AS title, + marc_authority.entity_id AS marc_authority_entity_id, + marc_authority.error AS marc_authority_entity_error + FROM marc_authority +) AS marc_authority_info ON marc_authority_info.source_id = records_actions.source_id + + LEFT JOIN ( + SELECT marc_holdings.action_type AS action_type, + marc_holdings.source_id AS source_id, + marc_holdings.title AS title, + marc_holdings.entity_id AS marc_authority_entity_id, + marc_holdings.error AS marc_authority_entity_error + FROM marc_holdings +) AS marc_holdings_info ON marc_holdings_info.source_id = records_actions.source_id + + LEFT JOIN (SELECT journal_records.source_id, + CASE + WHEN COUNT(*) = 1 THEN array_to_string(array_agg(journal_records.error), '', '') + ELSE ''['' || array_to_string(array_agg(journal_records.error), '', '') || '']'' + END AS error + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'' AND journal_records.error != '''' GROUP BY journal_records.source_id) AS rec_errors + ON rec_errors.source_id = records_actions.source_id + + UNION -SELECT records_actions.job_execution_id, records_actions.source_id, source_record_order, entity_hrid as invoiceline_number, title, +SELECT records_actions.job_execution_id AS job_execution_id, + records_actions.source_id AS source_id, + records_actions.source_id AS incoming_record_id, + source_record_order AS source_record_order, + entity_hrid as invoiceline_number, + invoice_line_info.title AS title, CASE - WHEN marc_errors_number != 0 OR marc_actions[array_length(marc_actions, 1)] = ''NON_MATCH'' THEN ''DISCARDED'' - WHEN marc_actions[array_length(marc_actions, 1)] = ''CREATE'' THEN ''CREATED'' - WHEN marc_actions[array_length(marc_actions, 1)] = ''UPDATE'' THEN ''UPDATED'' - END AS source_record_action_status, + WHEN marc_errors_number != 0 OR marc_actions[array_length(marc_actions, 1)] = ''NON_MATCH'' THEN ''DISCARDED'' + WHEN marc_actions[array_length(marc_actions, 1)] = ''CREATE'' THEN ''CREATED'' + WHEN marc_actions[array_length(marc_actions, 1)] IN (''UPDATE'', ''MODIFY'') THEN ''UPDATED'' + END AS source_record_action_status, + records_actions.source_record_error[1] as source_entity_error, + records_actions.source_record_tenant_id AS source_record_tenant_id, null AS instance_action_status, + null AS instance_entity_id, + null AS instance_entity_hrid, + null AS instance_entity_error, + null AS instance_entity_tenant_id, null AS holdings_action_status, + null AS holdings_entity_id, + null AS holdings_entity_hrid, + null AS holdings_permanent_location_id, + null AS holdings_entity_error, null AS item_action_status, + null AS item_entity_id, + null AS item_entity_hrid, + null AS item_entity_error, + null AS item_holdings_id, null AS authority_action_status, + null AS authority_entity_id, + null AS authority_entity_error, null AS po_line_action_status, + null AS po_line_entity_id, + null AS po_line_entity_hrid, + null AS po_line_entity_error, + null AS order_entity_id, get_entity_status(invoice_actions, invoice_errors_number) AS invoice_action_status, - error, + ARRAY[invoice_fields.invoice_entity_id::text] AS invoice_entity_id, + ARRAY[invoice_fields.invoice_entity_hrid::text] AS invoice_entity_hrid, + invoice_fields.invoice_entity_error AS invoice_entity_error, + invoice_line_info.invoice_line_action_status AS invoice_line_action_status, + invoice_line_info.invoice_line_entity_id AS invoice_line_entity_id, + invoice_line_info.invoice_line_entity_hrid AS invoice_line_entity_hrid, + invoice_line_info.invoice_line_entity_error AS invoice_line_entity_error, records_actions.total_count, - invoiceLineJournalRecordId, + invoiceLineJournalRecordId AS invoice_line_journal_record_id, records_actions.source_record_entity_type, - records_actions.holdings_entity_hrid, CASE - WHEN get_entity_status(invoice_actions, invoice_errors_number) IS NOT null THEN string_to_array(entity_hrid, ''-'')::int[] - ELSE ARRAY[source_record_order] - END AS source_record_order_array + WHEN get_entity_status(invoice_actions, invoice_errors_number) IS NOT null THEN string_to_array(entity_hrid, ''-'')::int[] + ELSE ARRAY[source_record_order] + END AS source_record_order_array FROM ( - SELECT journal_records.source_id, journal_records.job_execution_id, source_record_order, entity_hrid, title, error, - array[]::varchar[] AS marc_actions, - cast(0 as integer) AS marc_errors_number, - array_agg(action_type) FILTER (WHERE entity_type = ''INVOICE'') AS invoice_actions, - count(journal_records.source_id) FILTER (WHERE entity_type = ''INVOICE'' AND journal_records.error != '''') AS invoice_errors_number, - count(journal_records.source_id) OVER () AS total_count, - id AS invoiceLineJournalRecordId, - (array_agg(entity_type) FILTER (WHERE entity_type IN (''EDIFACT'')))[1] AS source_record_entity_type, - array[]::varchar[] as holdings_entity_hrid - FROM journal_records - WHERE journal_records.job_execution_id = ''%1$s'' and entity_type = ''INVOICE'' and title != ''INVOICE'' - GROUP BY journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id, - entity_hrid, title, error, id - HAVING count(journal_records.source_id) FILTER (WHERE (%3$L IN (''ALL'', ''INVOICE'')) AND (NOT %2$L or journal_records.error <> '''')) > 0 + SELECT journal_records.source_id, journal_records.job_execution_id, source_record_order, entity_hrid, title, error, + array[]::varchar[] AS marc_actions, + cast(0 as integer) AS marc_errors_number, + array_agg(action_type) FILTER (WHERE entity_type = ''INVOICE'') AS invoice_actions, + count(journal_records.source_id) FILTER (WHERE entity_type = ''INVOICE'' AND journal_records.error != '''') AS invoice_errors_number, + array_agg(error) FILTER (WHERE entity_type = ''EDIFACT'') AS source_record_error, + count(journal_records.source_id) OVER () AS total_count, + journal_records.tenant_id AS source_record_tenant_id, + id AS invoiceLineJournalRecordId, + (array_agg(entity_type) FILTER (WHERE entity_type IN (''EDIFACT'')))[1] AS source_record_entity_type + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'' and entity_type = ''INVOICE'' and title != ''INVOICE'' + GROUP BY journal_records.source_id, journal_records.source_record_order, journal_records.job_execution_id, + entity_hrid, title, error, id + HAVING count(journal_records.source_id) FILTER (WHERE (%3$L IN (''ALL'', ''INVOICE'')) AND (NOT %2$L or journal_records.error <> '''')) > 0 ) AS records_actions + + LEFT JOIN LATERAL ( + SELECT journal_records.source_id, + max(entity_id) FILTER (WHERE entity_type = ''INVOICE'' AND title = ''INVOICE'') AS invoice_entity_id, + max(entity_hrid) FILTER (WHERE entity_type = ''INVOICE'' AND title = ''INVOICE'') AS invoice_entity_hrid, + max(error) FILTER (WHERE entity_type = ''INVOICE'' AND title = ''INVOICE'') AS invoice_entity_error + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'' AND (entity_type = ''INVOICE'' OR title = ''INVOICE'') + GROUP BY journal_records.source_id + ) AS invoice_fields ON records_actions.source_id = invoice_fields.source_id + + + LEFT JOIN LATERAL ( + SELECT journal_records.source_id, + journal_records.job_execution_id, + journal_records.title, + CASE WHEN journal_records.action_status = ''ERROR'' THEN ''DISCARDED'' + WHEN journal_records.action_type = ''CREATE'' THEN ''CREATED'' + END AS invoice_line_action_status, + entity_hrid AS invoice_line_entity_hrid, + entity_id AS invoice_line_entity_id, + error AS invoice_line_entity_error + FROM journal_records + WHERE journal_records.job_execution_id = ''%1$s'' AND journal_records.entity_type = ''INVOICE'' AND journal_records.title != ''INVOICE'' + ) AS invoice_line_info ON records_actions.source_id = invoice_line_info.source_id AND records_actions.entity_hrid = invoice_line_info.invoice_line_entity_hrid + ORDER BY %5$I %6$s -LIMIT %7$s OFFSET %8$s;', +LIMIT %7$s OFFSET %8$s; +', jobExecutionId, errorsOnly, entityType, v_entityAttribute, v_sortingField, sortingDir, limitVal, offsetVal); END; $$ LANGUAGE plpgsql; diff --git a/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_incoming_records_table.sql b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_incoming_records_table.sql new file mode 100644 index 000000000..29632fb6e --- /dev/null +++ b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/create_incoming_records_table.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS incoming_records ( + id uuid NOT NULL, + job_execution_id uuid NOT NULL, + incoming_record jsonb NOT NULL, + CONSTRAINT incoming_records_pkey PRIMARY KEY (id), + CONSTRAINT incoming_records_jobexecutionid_fkey FOREIGN KEY (job_execution_id) + REFERENCES job_execution (id) +); + +CREATE INDEX IF NOT EXISTS incoming_records_jobexecutionid_index ON incoming_records USING BTREE (job_execution_id); diff --git a/mod-source-record-manager-server/src/main/resources/templates/db_scripts/schema.json b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/schema.json index 77521ca29..9db21d3cb 100644 --- a/mod-source-record-manager-server/src/main/resources/templates/db_scripts/schema.json +++ b/mod-source-record-manager-server/src/main/resources/templates/db_scripts/schema.json @@ -276,6 +276,16 @@ "run": "after", "snippet": "ALTER TABLE journal_records ADD COLUMN IF NOT EXISTS tenant_id text;", "fromModuleVersion": "mod-source-record-manager-3.7.0" + }, + { + "run": "after", + "snippetPath": "create_incoming_records_table.sql", + "fromModuleVersion": "mod-source-record-manager-3.8.0" + }, + { + "run": "after", + "snippetPath": "create_get_job_log_entries_function.sql", + "fromModuleVersion": "mod-source-record-manager-3.8.0" } ] } diff --git a/mod-source-record-manager-server/src/test/java/org/folio/dao/IncomingRecordDaoImplTest.java b/mod-source-record-manager-server/src/test/java/org/folio/dao/IncomingRecordDaoImplTest.java new file mode 100644 index 000000000..132c18da3 --- /dev/null +++ b/mod-source-record-manager-server/src/test/java/org/folio/dao/IncomingRecordDaoImplTest.java @@ -0,0 +1,88 @@ +package org.folio.dao; + +import io.vertx.core.Vertx; +import io.vertx.ext.unit.Async; +import io.vertx.ext.unit.TestContext; +import io.vertx.ext.unit.junit.VertxUnitRunner; +import org.folio.dao.util.PostgresClientFactory; +import org.folio.rest.impl.AbstractRestTest; +import org.folio.rest.jaxrs.model.IncomingRecord; +import org.folio.rest.jaxrs.model.JobExecution; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; + +@RunWith(VertxUnitRunner.class) +public class IncomingRecordDaoImplTest extends AbstractRestTest { + + private static final String TENANT_ID = "diku"; + + @Spy + private PostgresClientFactory postgresClientFactory = new PostgresClientFactory(Vertx.vertx()); + @InjectMocks + private IncomingRecordDao incomingRecordDao = new IncomingRecordDaoImpl(); + + @Before + public void setUp(TestContext context) throws IOException { + MockitoAnnotations.openMocks(this); + super.setUp(context); + } + + @Test + public void shouldGetById(TestContext context) { + Async async = context.async(); + + List createdJobExecutions = constructAndPostInitJobExecutionRqDto(1).getJobExecutions(); + String jobExecutionId = createdJobExecutions.get(0).getId(); + + String id = UUID.randomUUID().toString(); + IncomingRecord incomingRecord = buildIncomingRecord(id, jobExecutionId); + + incomingRecordDao.saveBatch(List.of(incomingRecord), TENANT_ID) + .compose(r -> + incomingRecordDao.getById(id, TENANT_ID) + .onComplete(ar -> { + context.assertTrue(ar.succeeded()); + context.assertTrue(ar.result().isPresent()); + IncomingRecord result = ar.result().get(); + context.assertEquals(id, result.getId()); + context.assertEquals(jobExecutionId, result.getJobExecutionId()); + context.assertEquals("rawRecord", result.getRawRecordContent()); + context.assertEquals("parsedRecord", result.getParsedRecordContent()); + async.complete(); + })); + } + + @Test + public void shouldSaveBatch(TestContext context) { + Async async = context.async(); + + List createdJobExecutions = constructAndPostInitJobExecutionRqDto(1).getJobExecutions(); + String jobExecutionId = createdJobExecutions.get(0).getId(); + + String id1 = UUID.randomUUID().toString(); + String id2 = UUID.randomUUID().toString(); + IncomingRecord incomingRecord1 = buildIncomingRecord(id1, jobExecutionId); + IncomingRecord incomingRecord2 = buildIncomingRecord(id2, jobExecutionId); + + incomingRecordDao.saveBatch(List.of(incomingRecord1, incomingRecord2), TENANT_ID) + .onComplete(ar -> { + context.assertTrue(ar.succeeded()); + context.assertEquals(2, ar.result().size()); + async.complete(); + }); + } + + private static IncomingRecord buildIncomingRecord(String id, String jobExecutionId) { + return new IncomingRecord() + .withId(id).withJobExecutionId(jobExecutionId).withRecordType(IncomingRecord.RecordType.MARC_BIB).withOrder(0) + .withRawRecordContent("rawRecord").withParsedRecordContent("parsedRecord"); + } +} diff --git a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/AbstractRestTest.java b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/AbstractRestTest.java index 9443094af..f2b3cea1a 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/AbstractRestTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/AbstractRestTest.java @@ -5,6 +5,7 @@ import static com.github.tomakehurst.wiremock.client.WireMock.post; import static net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith; import static net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.defaultClusterConfig; +import static org.folio.dao.IncomingRecordDaoImpl.INCOMING_RECORDS_TABLE; import static org.folio.dataimport.util.RestUtil.OKAPI_TENANT_HEADER; import static org.folio.dataimport.util.RestUtil.OKAPI_URL_HEADER; import static org.folio.kafka.KafkaTopicNameHelper.getDefaultNameSpace; @@ -520,13 +521,14 @@ private void clearTable(TestContext context) { PostgresClient pgClient = PostgresClient.getInstance(vertx, TENANT_ID); pgClient.delete(CHUNKS_TABLE_NAME, new Criterion(), event1 -> pgClient.delete(JOURNAL_RECORDS_TABLE, new Criterion(), event2 -> - pgClient.delete(JOB_EXECUTION_PROGRESS_TABLE, new Criterion(), event3 -> - pgClient.delete(JOB_EXECUTIONS_TABLE_NAME, new Criterion(), event4 -> { - if (event3.failed()) { - context.fail(event3.cause()); - } - async.complete(); - })))); + pgClient.delete(INCOMING_RECORDS_TABLE, new Criterion(), event3 -> + pgClient.delete(JOB_EXECUTION_PROGRESS_TABLE, new Criterion(), event4 -> + pgClient.delete(JOB_EXECUTIONS_TABLE_NAME, new Criterion(), event5 -> { + if (event4.failed()) { + context.fail(event4.cause()); + } + async.complete(); + }))))); } protected InitJobExecutionsRsDto constructAndPostInitJobExecutionRqDto(int filesNumber) { diff --git a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/changeManager/ChangeManagerAPITest.java b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/changeManager/ChangeManagerAPITest.java index ac466344d..1e1bd3f0c 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/changeManager/ChangeManagerAPITest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/changeManager/ChangeManagerAPITest.java @@ -12,6 +12,8 @@ import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_ERROR; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_FOR_UPDATE_RECEIVED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_RAW_RECORDS_CHUNK_PARSED; import static org.folio.rest.jaxrs.model.ProfileSnapshotWrapper.ContentType.ACTION_PROFILE; @@ -29,6 +31,10 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; @@ -1784,21 +1790,17 @@ private void fillInRecordOrderIfAtLeastOneRecordHasNoOrder(String rawRecord) thr .then() .statusCode(HttpStatus.SC_NO_CONTENT); - String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); - List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1) + String topicToObserve = formatToKafkaTopicName(DI_INCOMING_MARC_BIB_RECORD_PARSED.value()); + List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 3) .observeFor(30, TimeUnit.SECONDS) .build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); - assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - - RecordCollection processedRecords = Json - .decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - assertEquals(3, processedRecords.getRecords().size()); - - assertEquals(4, processedRecords.getRecords().get(0).getOrder().intValue()); - assertEquals(5, processedRecords.getRecords().get(1).getOrder().intValue()); - assertEquals(6, processedRecords.getRecords().get(2).getOrder().intValue()); + Event obtainedEvent = Json.decodeValue(observedValues.get(2), Event.class); + assertEquals(DI_INCOMING_MARC_BIB_RECORD_PARSED.value(), obtainedEvent.getEventType()); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + assertNotNull(eventPayload.getContext()); + JsonObject record = new JsonObject(eventPayload.getContext().get("MARC_BIBLIOGRAPHIC")); + assertNotEquals(0, record.getInteger("order").intValue()); } @Test @@ -2164,34 +2166,33 @@ public void shouldHaveErrorRecordIf999ffsFieldExistsAndCreateInstanceActionProfi .statusCode(HttpStatus.SC_NO_CONTENT); async.complete(); - String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); - List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1) + String topicToObserve = formatToKafkaTopicName(DI_ERROR.value()); + List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 2) .observeFor(30, TimeUnit.SECONDS) .build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(5), Event.class); - assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - RecordCollection recordCollection = Json - .decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - Assert.assertNull(recordCollection.getRecords().get(0).getMatchedId()); - Assert.assertNotNull(recordCollection.getRecords().get(0).getErrorRecord()); - Assert.assertEquals( - "{\"error\":\"A new Instance was not created because the incoming record already contained a 999ff$s or 999ff$i field\"}", - recordCollection.getRecords().get(0).getErrorRecord().getDescription()); + Event obtainedEvent = Json.decodeValue(observedValues.get(1), Event.class); + assertEquals(DI_ERROR.value(), obtainedEvent.getEventType()); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + + assertNotNull(eventPayload.getContext()); + JsonObject record = new JsonObject(eventPayload.getContext().get("MARC_BIBLIOGRAPHIC")); + assertNull(record.getString("matchedId")); + assertFalse(record.getJsonObject("errorRecord").isEmpty()); + assertEquals("A new Instance was not created because the incoming record already contained a 999ff$s or 999ff$i field", + new JsonObject(eventPayload.getContext().get("ERROR")).getString("error")); } @Test - public void shouldHaveErrorRecordIsNullIf999ffsFieldExistsAndCreateInstanceActionProfileWithNonMatch( - TestContext testContext) throws InterruptedException { + public void shouldHaveErrorRecordIsNullIf999ffsFieldExistsAndCreateInstanceActionProfileWithNonMatch(TestContext testContext) throws InterruptedException { InitJobExecutionsRsDto response = constructAndPostInitJobExecutionRqDto(1); List createdJobExecutions = response.getJobExecutions(); assertThat(createdJobExecutions.size(), is(1)); JobExecution jobExec = createdJobExecutions.get(0); - WireMock.stubFor( - WireMock.get("/data-import-profiles/jobProfiles/" + DEFAULT_INSTANCE_JOB_PROFILE_ID + "?withRelations=false&") - .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_INSTANCE_JOB_PROFILE_ID) - .withName("Default - Create instance and SRS MARC Bib"))))); + WireMock.stubFor(WireMock.get("/data-import-profiles/jobProfiles/" + DEFAULT_INSTANCE_JOB_PROFILE_ID + "?withRelations=false&") + .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_INSTANCE_JOB_PROFILE_ID) + .withName("Default - Create instance and SRS MARC Bib"))))); WireMock.stubFor(post(RECORDS_SERVICE_URL) .willReturn(created().withTransformers(RequestToResponseTransformer.NAME))); @@ -2223,15 +2224,16 @@ public void shouldHaveErrorRecordIsNullIf999ffsFieldExistsAndCreateInstanceActio .statusCode(HttpStatus.SC_NO_CONTENT); async.complete(); - String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); + String topicToObserve = formatToKafkaTopicName(DI_INCOMING_MARC_BIB_RECORD_PARSED.value()); List observedValues = kafkaCluster.observeValues( - ObserveKeyValues.on(topicToObserve, 1).observeFor(30, TimeUnit.SECONDS).build()); + ObserveKeyValues.on(topicToObserve, 57).observeFor(30, TimeUnit.SECONDS).build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); - assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - Assert.assertNull(recordCollection.getRecords().get(0).getMatchedId()); - Assert.assertNull(recordCollection.getRecords().get(0).getErrorRecord()); + Event obtainedEvent = Json.decodeValue(observedValues.get(56), Event.class); + assertEquals(DI_INCOMING_MARC_BIB_RECORD_PARSED.value(), obtainedEvent.getEventType()); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + JsonObject record = new JsonObject(eventPayload.getContext().get("MARC_BIBLIOGRAPHIC")); + assertNull(record.getString("matchedId")); + assertNull(record.getJsonObject("errorRecord")); } @Test @@ -2243,10 +2245,8 @@ public void shouldHaveErrorRecordIf999ffsFieldExistsAndCreateMarcAuthorityAction assertThat(createdJobExecutions.size(), is(1)); JobExecution jobExec = createdJobExecutions.get(0); - WireMock.stubFor(WireMock.get( - "/data-import-profiles/jobProfiles/" + DEFAULT_MARC_AUTHORITY_JOB_PROFILE_ID + "?withRelations=false&") - .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_MARC_AUTHORITY_JOB_PROFILE_ID) - .withName("Default - Create SRS MARC Authority"))))); + WireMock.stubFor(WireMock.get("/data-import-profiles/jobProfiles/" + DEFAULT_MARC_AUTHORITY_JOB_PROFILE_ID + "?withRelations=false&") + .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_MARC_AUTHORITY_JOB_PROFILE_ID).withName("Default - Create SRS MARC Authority"))))); WireMock.stubFor(post(RECORDS_SERVICE_URL) .willReturn(created().withTransformers(RequestToResponseTransformer.NAME))); @@ -2279,18 +2279,16 @@ public void shouldHaveErrorRecordIf999ffsFieldExistsAndCreateMarcAuthorityAction async.complete(); String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); - List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 3) + List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1) .observeFor(30, TimeUnit.SECONDS) .build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(3), Event.class); + Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - RecordCollection recordCollection = Json - .decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - Assert.assertNull(recordCollection.getRecords().get(0).getMatchedId()); - Assert.assertNotNull(recordCollection.getRecords().get(0).getErrorRecord()); - Assert.assertEquals( - "{\"error\":\"A new MARC-Authority was not created because the incoming record already contained a 999ff$s or 999ff$i field\"}", + RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); + assertNull(recordCollection.getRecords().get(0).getMatchedId()); + assertNotNull(recordCollection.getRecords().get(0).getErrorRecord()); + assertEquals("{\"error\":\"A new MARC-Authority was not created because the incoming record already contained a 999ff$s or 999ff$i field\"}", recordCollection.getRecords().get(0).getErrorRecord().getDescription()); } @@ -2328,17 +2326,15 @@ public void shouldSetErrorToRecordWithInvalidLeaderLine(TestContext testContext) .statusCode(HttpStatus.SC_NO_CONTENT); async.complete(); - String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); - List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 3) - .observeFor(30, TimeUnit.SECONDS) - .build()); + String topicToObserve = formatToKafkaTopicName(DI_ERROR.value()); + List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1) + .observeFor(30, TimeUnit.SECONDS).build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(2), Event.class); - assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - assertEquals(1, recordCollection.getRecords().size()); - MatcherAssert.assertThat(recordCollection.getRecords().get(0).getErrorRecord().getDescription(), - containsString("Error during analyze leader line for determining record type")); + Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); + assertEquals(DI_ERROR.value(), obtainedEvent.getEventType()); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + MatcherAssert.assertThat(new JsonObject(eventPayload.getContext().get("ERROR")).getString("message"), + containsString("Error during analyze leader line for determining record type for record with id")); } @Test @@ -2350,10 +2346,8 @@ public void shouldHaveErrorRecordIf999ffsFieldExistsAndCreateMarcHoldingsActionP assertThat(createdJobExecutions.size(), is(1)); JobExecution jobExec = createdJobExecutions.get(0); - WireMock.stubFor(WireMock.get( - "/data-import-profiles/jobProfiles/" + DEFAULT_MARC_HOLDINGS_JOB_PROFILE_ID + "?withRelations=false&") - .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_MARC_HOLDINGS_JOB_PROFILE_ID) - .withName("Default - Create Holdings and SRS MARC Holdings"))))); + WireMock.stubFor(WireMock.get("/data-import-profiles/jobProfiles/" + DEFAULT_MARC_HOLDINGS_JOB_PROFILE_ID + "?withRelations=false&") + .willReturn(WireMock.ok().withBody(Json.encode(new JobProfile().withId(DEFAULT_MARC_HOLDINGS_JOB_PROFILE_ID).withName("Default - Create Holdings and SRS MARC Holdings"))))); WireMock.stubFor(post(RECORDS_SERVICE_URL) .willReturn(created().withTransformers(RequestToResponseTransformer.NAME))); @@ -2386,18 +2380,16 @@ public void shouldHaveErrorRecordIf999ffsFieldExistsAndCreateMarcHoldingsActionP async.complete(); String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value()); - List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 7) + List observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 2) .observeFor(30, TimeUnit.SECONDS) .build()); - Event obtainedEvent = Json.decodeValue(observedValues.get(7), Event.class); + Event obtainedEvent = Json.decodeValue(observedValues.get(1), Event.class); assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType()); - RecordCollection recordCollection = Json - .decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - Assert.assertNull(recordCollection.getRecords().get(0).getMatchedId()); - Assert.assertNotNull(recordCollection.getRecords().get(0).getErrorRecord()); - Assert.assertEquals( - "{\"error\":\"A new MARC-Holding was not created because the incoming record already contained a 999ff$s or 999ff$i field\"}", + RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); + assertNull(recordCollection.getRecords().get(0).getMatchedId()); + assertNotNull(recordCollection.getRecords().get(0).getErrorRecord()); + assertEquals("{\"error\":\"A new MARC-Holding was not created because the incoming record already contained a 999ff$s or 999ff$i field\"}", recordCollection.getRecords().get(0).getErrorRecord().getDescription()); } diff --git a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderJobLogEntriesAPITest.java b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderJobLogEntriesAPITest.java index 35b0530db..3e52ae718 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderJobLogEntriesAPITest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderJobLogEntriesAPITest.java @@ -26,12 +26,14 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; import java.util.Date; import java.util.List; import java.util.UUID; import java.util.stream.IntStream; +import com.google.common.collect.Lists; import org.apache.http.HttpStatus; import org.folio.dao.JournalRecordDaoImpl; import org.folio.dao.util.PostgresClientFactory; @@ -39,9 +41,9 @@ import org.folio.rest.impl.AbstractRestTest; import org.folio.rest.jaxrs.model.ActionStatus; import org.folio.rest.jaxrs.model.JobExecution; -import org.folio.rest.jaxrs.model.JobLogEntryDto; -import org.folio.rest.jaxrs.model.JobLogEntryDtoCollection; import org.folio.rest.jaxrs.model.JournalRecord; +import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -77,314 +79,6 @@ public void setUp() { MockitoAnnotations.initMocks(this); } - @Test - public void shouldReturnEmptyListOnGetIfHasNoLogRecordsBySpecifiedJobId() { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + UUID.randomUUID().toString()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", empty()) - .body("totalRecords", is(0)); - } - - @Test - public void shouldReturnMarcBibUpdatedWhenMarcBibWasUpdated(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "in00000000001", null, 0, UPDATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnOneEntryIfTwoErrorsDuringMultipleCreation(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - String instanceId = UUID.randomUUID().toString(); - String instanceHrid = "i001"; - - String[] holdingsId = generateRandomUUIDs(3); - String[] holdingsHrid = {"h001","h002","h003"}; - - String[] permanentLocation = {UUID.randomUUID().toString()}; - - String errorMsg1 = "test error1"; - String errorMsg2 = "test error2"; - String errorArray = "[test error1, test error2]"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[0], holdingsHrid[0], null, 0, CREATE, HOLDINGS, COMPLETED, null, null,instanceId,null, permanentLocation[0])) - .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[1],null, null, 0, CREATE, HOLDINGS, ERROR, errorMsg1, null,null,null, null)) - .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[2], null, null, 0, CREATE, HOLDINGS, ERROR, errorMsg2, null,null,null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .log().all() - .body("entries", hasSize(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) - .body("entries[0].holdingsActionStatus", is(ActionStatus.DISCARDED.value())) - .body("entries[0].error", is(errorArray)); - async.complete(); - })); - } - - @Test - public void shouldReturnMarcBibUpdatedWhenMarcBibWasModified(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle,0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, MODIFY, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnMarcBibCreatedWhenMarcBibWasCreatedInNonMatchSection(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle,0, NON_MATCH, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnInstanceDiscardedWhenInstanceWasNotMatched(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "in00000000001", null, 0, NON_MATCH, INSTANCE, COMPLETED, null, null)) - .onSuccess(v -> async.complete()) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) - .body("entries[0].instanceActionStatus", is(ActionStatus.DISCARDED.value())) - .body("entries[0].error", emptyOrNullString()); - - async.complete(); - })); - } - - @Test - public void shouldReturnInstanceDiscardedWhenInstanceCreationFailed(TestContext context) { - //test - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, INSTANCE, ERROR, "error msg", null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].instanceActionStatus", is(ActionStatus.DISCARDED.value())) - .body("entries[0].error", not(emptyOrNullString())); - - async.complete(); - })); - } - - @Test - public void shouldReturnInstanceCreatedWhenMarcModify(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, MODIFY, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, UPDATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "instanceEntityID", "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].instanceActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnPoLineCreatedWhenMarcCreate(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "poLineEntityID", null, null, 0, CREATE, PO_LINE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].poLineActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnAuthorityDiscardedWhenErrorOnMatch(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "authorityEntityID", null, recordTitle, 0, MATCH, MARC_AUTHORITY, ERROR, "errorMsg", null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordType", is(MARC_AUTHORITY.value())) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].error", is(notNullValue())) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.DISCARDED.value())); - - async.complete(); - })); - } - @Test public void shouldReturnInstanceIdWhenHoldingsCreated(TestContext context) { Async async = context.async(); @@ -425,7 +119,7 @@ public void shouldReturnInstanceIdWhenHoldingsCreated(TestContext context) { .statusCode(HttpStatus.SC_OK) .body("jobExecutionId", is(holdingsCreationJobExecution.getId())) .body("sourceRecordId", is(holdingsCreationSourceRecordId)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is("instanceEntityID")) .body("relatedInstanceInfo.error", emptyOrNullString()) @@ -502,184 +196,6 @@ public void shouldReturnOneInstanceIdWhenMarcBibUpdatedAndInstanceUpdated(TestCo })); } - @Test - public void shouldReturnHoldingsMultipleWhenMultipleHoldingsWereProcessed(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, HOLDINGS, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null,null, 0, UPDATE, HOLDINGS, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].holdingsActionStatus", is(ActionStatus.CREATED.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnHoldingsTitleWithHoldingsHrid(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, MARC_HOLDINGS, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "ho00000000001", null, 0, CREATE, HOLDINGS, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is("Holdings ho00000000001")) - .body("entries[0].holdingsRecordHridList[0]", is("ho00000000001")) - .body("entries[0].sourceRecordType", is(MARC_HOLDINGS.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnSortedEntriesWhenSortByParameterSpecified(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId1 = UUID.randomUUID().toString(); - String sourceRecordId2 = UUID.randomUUID().toString(); - String sourceRecordId3 = UUID.randomUUID().toString(); - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000002", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, "in00000000003", null, 3, CREATE, INSTANCE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - List jobLogEntries = RestAssured.given() - .spec(spec) - .queryParam("sortBy", "source_record_order") - .queryParam("order", "desc") - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(3)) - .body("totalRecords", is(3)) - .extract().body().as(JobLogEntryDtoCollection.class).getEntries(); - - context.assertTrue(Integer.parseInt(jobLogEntries.get(0).getSourceRecordOrder()) > Integer.parseInt(jobLogEntries.get(1).getSourceRecordOrder())); - context.assertTrue(Integer.parseInt(jobLogEntries.get(1).getSourceRecordOrder()) > Integer.parseInt(jobLogEntries.get(2).getSourceRecordOrder())); - async.complete(); - })); - } - - @Test - public void shouldReturnBadRequestOnGetWhenInvalidSortingFieldIsSpecified() { - RestAssured.given() - .spec(spec) - .queryParam("sortBy", "invalid_field") - .queryParam("order", "asc") - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + UUID.randomUUID()) - .then() - .statusCode(HttpStatus.SC_BAD_REQUEST); - } - - @Test - public void shouldReturnLimitedCollectionOnGetWithLimitAndOffset(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId1 = UUID.randomUUID().toString(); - String sourceRecordId2 = UUID.randomUUID().toString(); - String sourceRecordId3 = UUID.randomUUID().toString(); - String recordTitle1 = "title1"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, recordTitle1, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000001", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, "title0", 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, "title3", 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, INSTANCE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .queryParam("sortBy", "source_record_order") - .queryParam("order", "desc") - .queryParam("limit", "1") - .queryParam("offset", "1") - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(3)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId1)) - .body("entries[0].sourceRecordTitle", is(recordTitle1)) - .body("entries[0].sourceRecordOrder", is("1")) - .body("entries[0].holdingsRecordHridList", is(empty())) - .body("entries[0].sourceRecordType", is(MARC_BIBLIOGRAPHIC.value())); - - async.complete(); - })); - } - - @Test - public void shouldReturnAuthorityCreated(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String recordTitle = "test title"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_AUTHORITY, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, AUTHORITY, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(recordTitle)) - .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) - .body("entries[0].authorityActionStatus", is(ActionStatus.CREATED.value())); - async.complete(); - })); - } - @Test public void shouldReturnEmptyDtoIfHasNoLogRecordsBySpecifiedJobIdAndRecordId() { RestAssured.given() @@ -712,7 +228,7 @@ public void shouldReturnMarcBibUpdatedByJobAndRecordIds(TestContext context) { .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("sourceRecordActionStatus", is(ActionStatus.CREATED.value())); async.complete(); @@ -743,7 +259,7 @@ public void shouldReturnEmptyMarcBibErrorAndInstanceDiscardedWhenInstanceCreatio .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(entityId)) .body("relatedInstanceInfo.hridList[0]", is(entityHrid)) @@ -774,7 +290,7 @@ public void shouldReturnNotEmptyMarcBibErrorWhenMarcBibFailed(TestContext contex .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", is("MarcBib error msg")); async.complete(); @@ -818,7 +334,7 @@ public void shouldReturnMarcBibAndAllEntitiesWithoutErrors(TestContext context) .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -867,7 +383,7 @@ public void shouldReturnDiscardedForHoldingsIfNoHoldingsCreated(TestContext cont .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -911,7 +427,7 @@ public void shouldReturnDiscardedForItemsIfNoItemsCreated(TestContext context) { .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -957,7 +473,7 @@ public void shouldReturnDataForParticularInvoiceLine(TestContext context) { .statusCode(HttpStatus.SC_OK) .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("sourceRecordTitle", is(invoiceLineDescription + "1")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList.size", empty()) @@ -1007,7 +523,7 @@ public void shouldReturnInvoiceLineInfoWithError(TestContext context) { .statusCode(HttpStatus.SC_OK) .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("sourceRecordTitle", is(invoiceLineDescription + "2")) .body("error", emptyOrNullString()) .body("relatedInvoiceInfo.idList[0]", is(invoiceId)) @@ -1020,257 +536,7 @@ public void shouldReturnInvoiceLineInfoWithError(TestContext context) { })); } - @Test - public void shouldReturnNotEmptyListWithInvoicesLines(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - - String invoiceLineDescription = "Some description"; - String invoiceLineId = "0704159"; - - CompositeFuture future = GenericCompositeFuture.all(List.of( - createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "228D126", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), - createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), - createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), - createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId))) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(3)) - .body("totalRecords", is(3)) - .body("entries*.jobExecutionId", everyItem(is(createdJobExecution.getId()))) - .body("entries*.sourceRecordId", everyItem(is(sourceRecordId))) - .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) - .body("entries[1].sourceRecordTitle", is(invoiceLineDescription + "2")) - .body("entries[2].sourceRecordTitle", is(invoiceLineDescription + "3")) - .body("entries[0].sourceRecordOrder", is(invoiceLineId + "-1")) - .body("entries[1].sourceRecordOrder", is(invoiceLineId + "-2")) - .body("entries[2].sourceRecordOrder", is(invoiceLineId + "-3")) - // skip result at 0 index, since it is invoice related journal record id - .body("entries[0].invoiceLineJournalRecordId", is(future.resultAt(1).toString())) - .body("entries[1].invoiceLineJournalRecordId", is(future.resultAt(2).toString())) - .body("entries[2].invoiceLineJournalRecordId", is(future.resultAt(3).toString())); - - async.complete(); - })); - } - - @Test - public void shouldReturnNotEmptyListWithInvoicesLinesThatContainsError(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - - String invoiceLineDescription = "Some description"; - String invoiceLineId = "0704159"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "228D126", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, ERROR, "Exception", null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - List jobLogEntries = RestAssured.given() - .spec(spec) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(3)) - .body("totalRecords", is(3)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) - .body("entries[0].sourceRecordOrder", is(invoiceLineId + "-1")) - .body("entries[2].sourceRecordTitle", is(invoiceLineDescription + "3")) - .body("entries[2].sourceRecordOrder", is(invoiceLineId + "-3")) - .extract().body().as(JobLogEntryDtoCollection.class).getEntries(); - - Assert.assertEquals("Exception", jobLogEntries.get(2).getError()); - Assert.assertEquals(ActionStatus.DISCARDED, jobLogEntries.get(2).getInvoiceActionStatus()); - - async.complete(); - })); - } - - @Test - public void shouldNotReturnMarcBibRecordsWhenInstanceDiscarderRetrievingWithErrorsOnlyParam(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - - String sourceRecordId1 = UUID.randomUUID().toString(); - String sourceRecordId2 = UUID.randomUUID().toString(); - String sourceRecordId3 = UUID.randomUUID().toString(); - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000002", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000001", null, 0, CREATE, INSTANCE, ERROR, "Error description 1", null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, "", null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, "in00000000003", null, 3, CREATE, INSTANCE, ERROR, "Error description 2", null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .param("errorsOnly", true) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(2)) - .body("totalRecords", is(2)) - .body("entries[0].error", is("Error description 1")) - .body("entries[1].error", is("Error description 2")) - .body("entries[0].sourceRecordOrder", is("0")) - .body("entries[1].sourceRecordOrder", is("3")); - - RestAssured.given() - .spec(spec) - .param("errorsOnly", true) - .param("entityType", "MARC") - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", is(empty())) - .body("totalRecords", is(0)); - - async.complete(); - })); - } - - @Test - public void shouldReturnOnlyInvoiceLinesWithErrorWhenRetrieveWithErrorsOnlyParam(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String invoiceLineDescription = "Some description"; - String invoiceLineId = "246816"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "10001", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, ERROR, "Exception", null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - List jobLogEntries = RestAssured.given() - .spec(spec) - .when() - .param("errorsOnly", true) - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(1)) - .body("totalRecords", is(1)) - .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) - .body("entries[0].sourceRecordId", is(sourceRecordId)) - .extract().body().as(JobLogEntryDtoCollection.class).getEntries(); - - Assert.assertEquals("Exception", jobLogEntries.get(0).getError()); - Assert.assertEquals(ActionStatus.DISCARDED, jobLogEntries.get(0).getInvoiceActionStatus()); - - async.complete(); - })); - } - - @Test - public void shouldReturnOnlyOneSummaryEntityWhenRetrieveUsingEntityTypeParamWithValueHoldings(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - - String sourceRecordId1 = UUID.randomUUID().toString(); - String sourceRecordId2 = UUID.randomUUID().toString(); - String sourceRecordId3 = UUID.randomUUID().toString(); - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000002", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "ho00000000002", null, 1, CREATE, HOLDINGS, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, "", null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, "in00000000003", null, 3, CREATE, INSTANCE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .param("entityType", INSTANCE.value()) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(3)) - .body("totalRecords", is(3)); - - RestAssured.given() - .spec(spec) - .param("entityType", HOLDINGS.value()) - .when() - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries", hasSize(1)) - .body("totalRecords", is(1)); - - async.complete(); - })); - } - - @Test - public void shouldNotReturnWhenRetrieveFromJobWhichInitializedByInvoiceUsingEntityTypeParamWithValueMARC(TestContext context) { - Async async = context.async(); - JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); - String sourceRecordId = UUID.randomUUID().toString(); - String invoiceLineDescription = "Some description"; - String invoiceLineId = "246816"; - - Future future = Future.succeededFuture() - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "10001", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) - .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, COMPLETED, null, null)) - .onFailure(context::fail); - - future.onComplete(ar -> context.verify(v -> { - RestAssured.given() - .spec(spec) - .when() - .param("entityType", "INVOICE") - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(3)) - .body("totalRecords", is(3)); - - RestAssured.given() - .spec(spec) - .when() - .param("entityType", "MARC") - .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) - .then() - .statusCode(HttpStatus.SC_OK) - .body("entries.size()", is(0)) - .body("totalRecords", is(0)); - - async.complete(); - })); - } - - @Test + @Test public void shouldReturnMarcBibAndAllEntitiesWithMultipleItemsHoldings(TestContext context) { Async async = context.async(); JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); @@ -1314,7 +580,7 @@ public void shouldReturnMarcBibAndAllEntitiesWithMultipleItemsHoldings(TestConte .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -1394,7 +660,7 @@ public void shouldReturnMarcBibAndAllEntitiesWithItemsHoldingsWithoutDiscarded(T .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -1448,7 +714,7 @@ public void shouldReturnMarcBibAndAllEntitiesWithDiscardedItemsHoldings(TestCont .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -1501,7 +767,7 @@ public void shouldReturnMarcBibAndAllEntitiesWithMultipleItemsUpdate(TestContext .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("error", emptyOrNullString()) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.hridList[0]", is(instanceHrid)) @@ -1545,7 +811,7 @@ public void shouldReturnCentralTenantIdForMarcRecordAndInstanceIfItIsSavedInJour .body("jobExecutionId", is(createdJobExecution.getId())) .body("sourceRecordId", is(sourceRecordId)) .body("sourceRecordTitle", is(recordTitle)) - .body("sourceRecordOrder", is(0)) + .body("sourceRecordOrder", is("0")) .body("sourceRecordTenantId", is(expectedCentralTenantId)) .body("relatedInstanceInfo.idList[0]", is(instanceId)) .body("relatedInstanceInfo.tenantId", is(expectedCentralTenantId)) diff --git a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderRecordProcessingLogCollectionAPITest.java b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderRecordProcessingLogCollectionAPITest.java new file mode 100644 index 000000000..4f875dadc --- /dev/null +++ b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetaDataProviderRecordProcessingLogCollectionAPITest.java @@ -0,0 +1,1630 @@ +package org.folio.rest.impl.metadataProvider; + +import io.restassured.RestAssured; +import io.vertx.core.CompositeFuture; +import io.vertx.core.Future; +import io.vertx.core.Vertx; +import io.vertx.ext.unit.Async; +import io.vertx.ext.unit.TestContext; +import io.vertx.ext.unit.junit.VertxUnitRunner; +import org.apache.http.HttpStatus; +import org.folio.dao.JournalRecordDaoImpl; +import org.folio.dao.util.PostgresClientFactory; +import org.folio.okapi.common.GenericCompositeFuture; +import org.folio.rest.impl.AbstractRestTest; +import org.folio.rest.jaxrs.model.ActionStatus; +import org.folio.rest.jaxrs.model.JobExecution; +import org.folio.rest.jaxrs.model.JournalRecord; +import org.folio.rest.jaxrs.model.RecordProcessingLogDto; +import org.folio.rest.jaxrs.model.RecordProcessingLogDtoCollection; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import java.util.Date; +import java.util.List; +import java.util.UUID; +import java.util.stream.IntStream; + +import static org.folio.rest.jaxrs.model.JournalRecord.ActionStatus.COMPLETED; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionStatus.ERROR; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.CREATE; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.MATCH; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.MODIFY; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.NON_MATCH; +import static org.folio.rest.jaxrs.model.JournalRecord.ActionType.UPDATE; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.AUTHORITY; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.EDIFACT; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.HOLDINGS; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.INSTANCE; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.INVOICE; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.ITEM; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.MARC_AUTHORITY; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.MARC_BIBLIOGRAPHIC; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.MARC_HOLDINGS; +import static org.folio.rest.jaxrs.model.JournalRecord.EntityType.PO_LINE; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; + +@RunWith(VertxUnitRunner.class) +public class MetaDataProviderRecordProcessingLogCollectionAPITest extends AbstractRestTest { + + private static final String GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH = "/metadata-provider/jobLogEntries"; + @Spy + Vertx vertx = Vertx.vertx(); + @Spy + @InjectMocks + PostgresClientFactory clientFactory; + @Spy + @InjectMocks + private JournalRecordDaoImpl journalRecordDao; + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + } + + @Test + public void shouldReturnEmptyListOnGetIfHasNoLogRecordsBySpecifiedJobId() { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + UUID.randomUUID().toString()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", empty()) + .body("totalRecords", is(0)); + } + + @Test + public void shouldReturnMarcBibUpdatedWhenMarcBibWasUpdated(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "in00000000001", null, 0, UPDATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnOneEntryIfWithAllMultipleHoldingsTwoErrorsDuringMultipleCreation(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String[] holdingsId = generateRandomUUIDs(3); + String[] holdingsHrid = {"h001", "h002", "h003"}; + + String[] permanentLocation = {UUID.randomUUID().toString()}; + + String errorMsg1 = "test error1"; + String errorMsg2 = "test error2"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[0], holdingsHrid[0], null, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, permanentLocation[0])) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[1], null, null, 0, CREATE, HOLDINGS, ERROR, errorMsg1, null, null, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[2], null, null, 0, CREATE, HOLDINGS, ERROR, errorMsg2, null, null, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .log().all() + .body("entries", hasSize(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) + .body("entries[0].relatedHoldingsInfo[0].actionStatus", is(ActionStatus.CREATED.value())) + .body("entries[0].relatedHoldingsInfo[0].id", is(holdingsId[0])) + .body("entries[0].relatedHoldingsInfo[1].actionStatus", is(ActionStatus.DISCARDED.value())) + .body("entries[0].relatedHoldingsInfo[1].error", oneOf(errorMsg1, errorMsg2)) + .body("entries[0].relatedHoldingsInfo[2].actionStatus", is(ActionStatus.DISCARDED.value())) + .body("entries[0].relatedHoldingsInfo[2].error", oneOf(errorMsg1, errorMsg2)); + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibUpdatedWhenMarcBibWasModified(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, MODIFY, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibCreatedWhenMarcBibWasCreatedInNonMatchSection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnInstanceDiscardedWhenInstanceWasNotMatched(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "in00000000001", null, 0, NON_MATCH, INSTANCE, COMPLETED, null, null)) + .onSuccess(v -> async.complete()) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) + .body("entries[0].instanceActionStatus", is(ActionStatus.DISCARDED.value())) + .body("entries[0].error", emptyOrNullString()); + + async.complete(); + })); + } + + @Test + public void shouldReturnInstanceDiscardedWhenInstanceCreationFailed(TestContext context) { + //test + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, INSTANCE, ERROR, "error msg", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].relatedInstanceInfo.actionStatus", is(ActionStatus.DISCARDED.value())) + .body("entries[0].relatedInstanceInfo.error", not(emptyOrNullString())); + + async.complete(); + })); + } + + @Test + public void shouldReturnInstanceCreatedWhenMarcModify(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, MODIFY, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, UPDATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "instanceEntityID", "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(2)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].relatedInstanceInfo.actionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnPoLineCreatedWhenMarcCreate(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "poLineEntityID", null, null, 0, CREATE, PO_LINE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].relatedPoLineInfo.actionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnAuthorityDiscardedWhenErrorOnMatch(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "authorityEntityID", null, recordTitle, 0, MATCH, MARC_AUTHORITY, ERROR, "errorMsg", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordType", is(MARC_AUTHORITY.value())) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].relatedAuthorityInfo.error", is(notNullValue())) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.DISCARDED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnInstanceIdWhenHoldingsCreatedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution instanceCreationJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + JobExecution holdingsCreationJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + + String instanceCreationSourceRecordId = UUID.randomUUID().toString(); + String holdingsCreationSourceRecordId = UUID.randomUUID().toString(); + + String recordTitle = "test title"; + + JournalRecord holdingsCreatedJournalRecord = new JournalRecord() + .withJobExecutionId(holdingsCreationJobExecution.getId()) + .withSourceId(holdingsCreationSourceRecordId) + .withTitle(null) + .withSourceRecordOrder(0) + .withEntityType(HOLDINGS) + .withActionType(CREATE) + .withActionStatus(COMPLETED) + .withError(null) + .withActionDate(new Date()) + .withEntityId("holdingsEntityID") + .withEntityHrId("ho00000000001") + .withInstanceId("instanceEntityID"); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(instanceCreationJobExecution.getId(), instanceCreationSourceRecordId, "instanceMarcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(instanceCreationJobExecution.getId(), instanceCreationSourceRecordId, "instanceEntityID", "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> journalRecordDao.save(holdingsCreatedJournalRecord, TENANT_ID).map(holdingsCreatedJournalRecord)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + holdingsCreationJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(holdingsCreationJobExecution.getId())) + .body("entries[0].sourceRecordId", is(holdingsCreationSourceRecordId)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is("instanceEntityID")) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].id", is("holdingsEntityID")) + .body("entries[0].relatedHoldingsInfo[0].hrid", is("ho00000000001")) + .body("entries[0].relatedHoldingsInfo[0].error", emptyOrNullString()); + async.complete(); + })); + } + + @Test + public void shouldReturnPoLineWithOrderIdWhenMarcCreateRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + String orderId = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "marcEntityID", null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, "poLineEntityID", null, null, 0, CREATE, PO_LINE, COMPLETED, "Test error", orderId)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) + .body("entries[0].relatedPoLineInfo", notNullValue()) + .body("entries[0].relatedPoLineInfo.orderId", is(orderId)) + .body("entries[0].relatedPoLineInfo.error", is("Test error")); + + async.complete(); + })); + } + + @Test + public void shouldReturnOneInstanceIdWhenMarcBibUpdatedAndInstanceUpdatedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution marcBibAndInstanceUpdateJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + + String marcBibAndInstanceUpdateSourceRecordId = UUID.randomUUID().toString(); + + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(marcBibAndInstanceUpdateJobExecution.getId(), marcBibAndInstanceUpdateSourceRecordId, "instanceEntityID", "in00000000001", null, 0, UPDATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(marcBibAndInstanceUpdateJobExecution.getId(), marcBibAndInstanceUpdateSourceRecordId, "instanceEntityID", "in00000000001", null, 0, UPDATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(marcBibAndInstanceUpdateJobExecution.getId(), marcBibAndInstanceUpdateSourceRecordId, "marcBibEntityID", null, recordTitle, 0, MODIFY, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + marcBibAndInstanceUpdateJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(marcBibAndInstanceUpdateJobExecution.getId())) + .body("entries[0].sourceRecordId", is(marcBibAndInstanceUpdateSourceRecordId)) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList.size()", is(1)) + .body("entries[0].relatedInstanceInfo.hridList.size()", is(1)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()); + + async.complete(); + })); + } + + @Test + public void shouldReturnHoldingsMultipleWhenMultipleHoldingsWereProcessed(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, HOLDINGS, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, UPDATE, HOLDINGS, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].relatedHoldingsInfo[0].actionStatus", is(ActionStatus.CREATED.value())); + async.complete(); + })); + } + + @Test + public void shouldReturnHoldingsTitleWithHoldingsHrid(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, MARC_HOLDINGS, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "ho00000000001", null, 0, CREATE, HOLDINGS, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is("Holdings ho00000000001")) + .body("entries[0].relatedHoldingsInfo[0].hrid", is("ho00000000001")) + .body("entries[0].sourceRecordType", is(MARC_HOLDINGS.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnSortedEntriesWhenSortByParameterSpecified(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId1 = UUID.randomUUID().toString(); + String sourceRecordId2 = UUID.randomUUID().toString(); + String sourceRecordId3 = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000002", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, "in00000000003", null, 3, CREATE, INSTANCE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + List recordProcessingLogDtos = RestAssured.given() + .spec(spec) + .queryParam("sortBy", "source_record_order") + .queryParam("order", "desc") + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(3)) + .body("totalRecords", is(3)) + .extract().body().as(RecordProcessingLogDtoCollection.class).getEntries(); + + context.assertTrue(Integer.parseInt(recordProcessingLogDtos.get(0).getSourceRecordOrder()) > Integer.parseInt(recordProcessingLogDtos.get(1).getSourceRecordOrder())); + context.assertTrue(Integer.parseInt(recordProcessingLogDtos.get(1).getSourceRecordOrder()) > Integer.parseInt(recordProcessingLogDtos.get(2).getSourceRecordOrder())); + async.complete(); + })); + } + + @Test + public void shouldReturnBadRequestOnGetWhenInvalidSortingFieldIsSpecified() { + RestAssured.given() + .spec(spec) + .queryParam("sortBy", "invalid_field") + .queryParam("order", "asc") + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + UUID.randomUUID()) + .then() + .statusCode(HttpStatus.SC_BAD_REQUEST); + } + + @Test + public void shouldReturnLimitedCollectionOnGetWithLimitAndOffset(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId1 = UUID.randomUUID().toString(); + String sourceRecordId2 = UUID.randomUUID().toString(); + String sourceRecordId3 = UUID.randomUUID().toString(); + String recordTitle1 = "title1"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, recordTitle1, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000001", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, "title0", 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, "title3", 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, INSTANCE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .queryParam("sortBy", "source_record_order") + .queryParam("order", "desc") + .queryParam("limit", "1") + .queryParam("offset", "1") + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(3)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId1)) + .body("entries[0].sourceRecordTitle", is(recordTitle1)) + .body("entries[0].sourceRecordOrder", is("1")) + .body("entries[0].relatedHoldingsInfo.hrid", is(empty())) + .body("entries[0].sourceRecordType", is(MARC_BIBLIOGRAPHIC.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnAuthorityCreated(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String entityId = UUID.randomUUID().toString(); + + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, entityId, null, recordTitle, 0, CREATE, MARC_AUTHORITY, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, AUTHORITY, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())) + .body("entries[0].relatedAuthorityInfo.actionStatus", is(ActionStatus.CREATED.value())); + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibUpdatedByJobAndRecordIdsRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, UPDATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].sourceRecordActionStatus", is(ActionStatus.CREATED.value())); + + async.complete(); + })); + } + + @Test + public void shouldReturnEmptyMarcBibErrorAndInstanceDiscardedWhenInstanceCreationFailedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String entityId = UUID.randomUUID().toString(); + String entityHrid = "001"; + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, entityId, entityHrid, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, entityId, entityHrid, null, 0, CREATE, INSTANCE, ERROR, "error msg", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(entityId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(entityHrid)) + .body("entries[0].relatedInstanceInfo.error", is("error msg")); + + async.complete(); + })); + } + + @Test + public void shouldReturnNotEmptyMarcBibErrorWhenMarcBibFailedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, ERROR, "MarcBib error msg", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", is("MarcBib error msg")); + + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibAndAllEntitiesWithoutErrorsRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String holdingsId = UUID.randomUUID().toString(); + String holdingsHrid = "h001"; + + String itemId = UUID.randomUUID().toString(); + String itemHrid = "it001"; + + String poLineId = UUID.randomUUID().toString(); + String poLineHrid = "po001"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId, holdingsHrid, null, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId, itemHrid, null, 0, CREATE, ITEM, COMPLETED, null, null, instanceId, holdingsId, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, poLineId, poLineHrid, null, 0, CREATE, PO_LINE, COMPLETED, null, null, instanceId, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].id", is(holdingsId)) + .body("entries[0].relatedHoldingsInfo[0].hrid", is(holdingsHrid)) + .body("entries[0].relatedHoldingsInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].id", is(itemId)) + .body("entries[0].relatedItemInfo[0].hrid", is(itemHrid)) + .body("entries[0].relatedItemInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedPoLineInfo.idList[0]", is(poLineId)) + .body("entries[0].relatedPoLineInfo.hridList[0]", is(poLineHrid)) + .body("entries[0].relatedPoLineInfo.error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList", empty()) + .body("entries[0].relatedInvoiceInfo.hridList", empty()) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()); + + async.complete(); + })); + } + + @Test + public void shouldReturnDiscardedForHoldingsIfNoHoldingsCreatedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String testError = "testError"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, HOLDINGS, ERROR, testError, null, null, null, null)); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].id", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].hrid", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].error", is(testError)); + + async.complete(); + })); + } + + @Test + public void shouldReturnDiscardedForItemsIfNoItemsCreatedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String holdingsId = UUID.randomUUID().toString(); + String holdingsHrid = "h001"; + + String testError = "testError"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId, holdingsHrid, recordTitle, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, ITEM, ERROR, testError, null, null, null, null)); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].id", is(holdingsId)) + .body("entries[0].relatedHoldingsInfo[0].hrid", is(holdingsHrid)) + .body("entries[0].relatedHoldingsInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].id", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].hrid", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].error", is(testError)); + async.complete(); + })); + } + + @Test + public void shouldReturnDataForInvoiceLinesRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String invoiceId = "aa67ab97-ec97-4145-beb8-0572dd60dd87"; + String invoiceHrid = "228D126"; + String invoiceVendorNumber = "0704159"; + String invoiceLineId1 = UUID.randomUUID().toString(); + String invoiceLineId2 = UUID.randomUUID().toString(); + String invoiceLineDescription = "Some description"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, EDIFACT, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, invoiceId, invoiceHrid, "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, invoiceLineId1, invoiceVendorNumber + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, invoiceLineId2, invoiceVendorNumber + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(2)) + .body("totalRecords", is(2)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) + .body("entries[0].sourceRecordOrder", is("0704159-1")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList.size", empty()) + .body("entries[0].relatedInstanceInfo.hridList.size", empty()) + .body("entries[0].relatedInstanceInfo.error", nullValue()) + .body("entries[0].relatedHoldingsInfo.size", empty()) + .body("entries[0].relatedItemInfo.size", empty()) + .body("entries[0].relatedPoLineInfo.idList.size", empty()) + .body("entries[0].relatedPoLineInfo.hridList.size", empty()) + .body("entries[0].relatedPoLineInfo.error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList[0]", is(invoiceId)) + .body("entries[0].relatedInvoiceInfo.hridList[0]", is(invoiceHrid)) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()) + .body("entries[0].relatedInvoiceLineInfo.id", is(invoiceLineId1)) + .body("entries[0].relatedInvoiceLineInfo.fullInvoiceLineNumber", is(invoiceVendorNumber + "-1")) + .body("entries[0].relatedInvoiceLineInfo.error", emptyOrNullString()) + .body("entries[0].invoiceLineJournalRecordId", notNullValue()) + + .body("entries[1].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[1].sourceRecordId", is(sourceRecordId)) + .body("entries[1].sourceRecordTitle", is(invoiceLineDescription + "2")) + .body("entries[1].sourceRecordOrder", is("0704159-2")) + .body("entries[1].error", emptyOrNullString()) + .body("entries[1].relatedInstanceInfo.idList.size", empty()) + .body("entries[1].relatedInstanceInfo.hridList.size", empty()) + .body("entries[1].relatedInstanceInfo.error", nullValue()) + .body("entries[1].relatedHoldingsInfo.size", empty()) + .body("entries[1].relatedItemInfo.size", empty()) + .body("entries[1].relatedPoLineInfo.idList.size", empty()) + .body("entries[1].relatedPoLineInfo.hridList.size", empty()) + .body("entries[1].relatedPoLineInfo.error", emptyOrNullString()) + .body("entries[1].relatedInvoiceInfo.idList[0]", is(invoiceId)) + .body("entries[1].relatedInvoiceInfo.hridList[0]", is(invoiceHrid)) + .body("entries[1].relatedInvoiceInfo.error", emptyOrNullString()) + .body("entries[1].relatedInvoiceLineInfo.id", is(invoiceLineId2)) + .body("entries[1].relatedInvoiceLineInfo.fullInvoiceLineNumber", is(invoiceVendorNumber + "-2")) + .body("entries[1].relatedInvoiceLineInfo.error", emptyOrNullString()) + .body("entries[1].invoiceLineJournalRecordId", notNullValue()); + async.complete(); + })); + } + + @Test + public void shouldReturnInvoiceLineInfoWithErrorRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String invoiceId = UUID.randomUUID().toString(); + String invoiceHrid = "228D126"; + String invoiceVendorNumber = "0704159"; + String invoiceLineId1 = UUID.randomUUID().toString(); + String invoiceLineDescription = "Some description"; + String errorMsg = "error-msg"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, EDIFACT, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, invoiceId, invoiceHrid, "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, invoiceLineId1, invoiceVendorNumber + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceVendorNumber + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, ERROR, errorMsg, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(2)) + .body("totalRecords", is(2)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) + .body("entries[0].sourceRecordOrder", is(invoiceVendorNumber + "-1")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList[0]", is(invoiceId)) + .body("entries[0].relatedInvoiceInfo.hridList[0]", is(invoiceHrid)) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()) + .body("entries[0].relatedInvoiceLineInfo.id", is(invoiceLineId1)) + .body("entries[0].relatedInvoiceLineInfo.fullInvoiceLineNumber", is(invoiceVendorNumber + "-1")) + .body("entries[0].relatedInvoiceLineInfo.error", emptyOrNullString()) + .body("entries[0].invoiceLineJournalRecordId", notNullValue()) + + .body("entries[1].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[1].sourceRecordId", is(sourceRecordId)) + .body("entries[1].sourceRecordTitle", is(invoiceLineDescription + "2")) + .body("entries[1].sourceRecordOrder", is(invoiceVendorNumber + "-2")) + .body("entries[1].error", emptyOrNullString()) + .body("entries[1].relatedInvoiceInfo.idList[0]", is(invoiceId)) + .body("entries[1].relatedInvoiceInfo.hridList[0]", is(invoiceHrid)) + .body("entries[1].relatedInvoiceInfo.error", emptyOrNullString()) + .body("entries[1].relatedInvoiceLineInfo.id", emptyOrNullString()) + .body("entries[1].relatedInvoiceLineInfo.fullInvoiceLineNumber", is(invoiceVendorNumber + "-2")) + .body("entries[1].relatedInvoiceLineInfo.error", is(errorMsg)) + .body("entries[1].invoiceLineJournalRecordId", notNullValue()); + async.complete(); + })); + } + + @Test + public void shouldReturnNotEmptyListWithInvoicesLines(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + + String invoiceLineDescription = "Some description"; + String invoiceLineId = "0704159"; + + CompositeFuture future = GenericCompositeFuture.all(List.of( + createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "228D126", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), + createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), + createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId), + createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, COMPLETED, null, null).map(JournalRecord::getId))) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(3)) + .body("totalRecords", is(3)) + .body("entries*.jobExecutionId", everyItem(is(createdJobExecution.getId()))) + .body("entries*.sourceRecordId", everyItem(is(sourceRecordId))) + .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) + .body("entries[1].sourceRecordTitle", is(invoiceLineDescription + "2")) + .body("entries[2].sourceRecordTitle", is(invoiceLineDescription + "3")) + .body("entries[0].sourceRecordOrder", is(invoiceLineId + "-1")) + .body("entries[1].sourceRecordOrder", is(invoiceLineId + "-2")) + .body("entries[2].sourceRecordOrder", is(invoiceLineId + "-3")) + // skip result at 0 index, since it is invoice related journal record id + .body("entries[0].invoiceLineJournalRecordId", is(future.resultAt(1).toString())) + .body("entries[1].invoiceLineJournalRecordId", is(future.resultAt(2).toString())) + .body("entries[2].invoiceLineJournalRecordId", is(future.resultAt(3).toString())); + + async.complete(); + })); + } + + @Test + public void shouldReturnNotEmptyListWithInvoicesLinesThatContainsError(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + + String invoiceLineDescription = "Some description"; + String invoiceLineId = "0704159"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "228D126", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, ERROR, "Exception", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + List recordProcessingLogDtos = RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(3)) + .body("totalRecords", is(3)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(invoiceLineDescription + "1")) + .body("entries[0].sourceRecordOrder", is(invoiceLineId + "-1")) + .body("entries[2].sourceRecordTitle", is(invoiceLineDescription + "3")) + .body("entries[2].sourceRecordOrder", is(invoiceLineId + "-3")) + .extract().body().as(RecordProcessingLogDtoCollection.class).getEntries(); + + Assert.assertEquals("Exception", recordProcessingLogDtos.get(2).getRelatedInvoiceLineInfo().getError()); + Assert.assertEquals(ActionStatus.DISCARDED, recordProcessingLogDtos.get(2).getRelatedInvoiceLineInfo().getActionStatus()); + Assert.assertEquals(ActionStatus.DISCARDED, recordProcessingLogDtos.get(2).getRelatedInvoiceInfo().getActionStatus()); + + async.complete(); + })); + } + + @Test + public void shouldNotReturnMarcBibRecordsWhenInstanceDiscarderRetrievingWithErrorsOnlyParam(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + + String sourceRecordId1 = UUID.randomUUID().toString(); + String sourceRecordId2 = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000001", null, 0, CREATE, INSTANCE, ERROR, "Error description 1", null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000003", null, 3, CREATE, INSTANCE, ERROR, "Error description 2", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .param("errorsOnly", true) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(2)) + .body("totalRecords", is(2)) + .body("entries[0].relatedInstanceInfo.error", is("Error description 1")) + .body("entries[1].relatedInstanceInfo.error", is("Error description 2")) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[1].sourceRecordOrder", is("3")); + + RestAssured.given() + .spec(spec) + .param("errorsOnly", true) + .param("entityType", "MARC") + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", is(empty())) + .body("totalRecords", is(0)); + + async.complete(); + })); + } + + @Test + public void shouldReturnOnlyInvoiceLinesWithErrorWhenRetrieveWithErrorsOnlyParam(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String invoiceLineDescription = "Some description"; + String invoiceLineId = "246816"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "10001", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, ERROR, "Exception", null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + List jobLogEntries = RestAssured.given() + .spec(spec) + .when() + .param("errorsOnly", true) + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .extract().body().as(RecordProcessingLogDtoCollection.class).getEntries(); + + Assert.assertEquals("Exception", jobLogEntries.get(0).getRelatedInvoiceLineInfo().getError()); + Assert.assertEquals(ActionStatus.DISCARDED, jobLogEntries.get(0).getRelatedInvoiceInfo().getActionStatus()); + + async.complete(); + })); + } + + @Test + public void shouldReturnOnlyOneSummaryEntityWhenRetrieveUsingEntityTypeParamWithValueHoldings(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + + String sourceRecordId1 = UUID.randomUUID().toString(); + String sourceRecordId2 = UUID.randomUUID().toString(); + String sourceRecordId3 = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, null, null, 1, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "in00000000002", null, 1, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId1, null, "ho00000000002", null, 1, CREATE, HOLDINGS, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, null, null, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId2, null, "in00000000001", null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, null, null, 3, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, "", null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId3, null, "in00000000003", null, 3, CREATE, INSTANCE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .param("entityType", INSTANCE.value()) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(3)) + .body("totalRecords", is(3)); + + RestAssured.given() + .spec(spec) + .param("entityType", HOLDINGS.value()) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries", hasSize(1)) + .body("totalRecords", is(1)); + + async.complete(); + })); + } + + @Test + public void shouldNotReturnWhenRetrieveFromJobWhichInitializedByInvoiceUsingEntityTypeParamWithValueMARC(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String invoiceLineDescription = "Some description"; + String invoiceLineId = "246816"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, "10001", "INVOICE", 0, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-1", invoiceLineDescription + "1", 1, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-2", invoiceLineDescription + "2", 2, CREATE, INVOICE, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, invoiceLineId + "-3", invoiceLineDescription + "3", 3, CREATE, INVOICE, COMPLETED, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .param("entityType", "INVOICE") + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(3)) + .body("totalRecords", is(3)); + + RestAssured.given() + .spec(spec) + .when() + .param("entityType", "MARC") + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .body("entries.size()", is(0)) + .body("totalRecords", is(0)); + + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibAndAllEntitiesWithMultipleItemsAndHoldings(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String[] holdingsId = generateRandomUUIDs(3); + String[] holdingsHrid = {"h001", "h002", "h003"}; + + String[] itemId = generateRandomUUIDs(4); + String[] itemHrid = {"it001", "it002", "it003", "it004"}; + + String[] permanentLocation = {UUID.randomUUID().toString(), UUID.randomUUID().toString(), UUID.randomUUID().toString()}; + + String errorMsg = "test error"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[0], holdingsHrid[0], null, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, permanentLocation[0])) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId[1], holdingsHrid[1], null, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, permanentLocation[1])) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId[0], itemHrid[0], null, 0, CREATE, ITEM, COMPLETED, null, null, instanceId, holdingsId[0], null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId[1], itemHrid[1], null, 0, CREATE, ITEM, COMPLETED, null, null, instanceId, holdingsId[1], null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, null, null, null, 0, CREATE, ITEM, ERROR, errorMsg, null, null, null, null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .log().all() + + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)); + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibAndAllEntitiesWithItemsHoldingsWithoutDiscardedRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + String holdingsId = UUID.randomUUID().toString(); + String holdingsHrid = "h001"; + + String itemId = UUID.randomUUID().toString(); + String itemHrid = "it001"; + + String permanentLocation = UUID.randomUUID().toString(); + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, recordTitle, 0, CREATE, INSTANCE, COMPLETED, null, null)) + + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, HOLDINGS, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, ITEM, COMPLETED, null, null)) + + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, holdingsId, holdingsHrid, recordTitle, 0, CREATE, HOLDINGS, COMPLETED, null, null, instanceId, null, permanentLocation)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId, itemHrid, recordTitle, 0, CREATE, ITEM, COMPLETED, null, null, instanceId, holdingsId, null)) + + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .log().all() + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo.size()", is(1)) + .body("entries[0].relatedHoldingsInfo[0].id", is(holdingsId)) + .body("entries[0].relatedHoldingsInfo[0].hrid", is(holdingsHrid)) + .body("entries[0].relatedHoldingsInfo[0].permanentLocationId", is(permanentLocation)) + .body("entries[0].relatedHoldingsInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedItemInfo.size()", is(1)) + .body("entries[0].relatedItemInfo[0].id", is(itemId)) + .body("entries[0].relatedItemInfo[0].hrid", is(itemHrid)) + .body("entries[0].relatedItemInfo[0].holdingsId", is(holdingsId)) + .body("entries[0].relatedItemInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList", empty()) + .body("entries[0].relatedInvoiceInfo.hridList", empty()) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()); + + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibAndAllEntitiesWithDiscardedItemsHoldingsRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, recordTitle, 0, CREATE, INSTANCE, COMPLETED, null, null)) + + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, HOLDINGS, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, NON_MATCH, ITEM, COMPLETED, null, null)) + + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .log().all() + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo.size()", is(1)) + .body("entries[0].relatedHoldingsInfo[0].id", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].hrid", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].permanentLocationId", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedItemInfo.size()", is(1)) + .body("entries[0].relatedItemInfo[0].id", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].hrid", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].holdingsId", emptyOrNullString()) + .body("entries[0].relatedItemInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList", empty()) + .body("entries[0].relatedInvoiceInfo.hridList", empty()) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()); + async.complete(); + })); + } + + @Test + public void shouldReturnMarcBibAndAllEntitiesWithMultipleItemsUpdateRecordProcessingLogDTOCollection(TestContext context) { + Async async = context.async(); + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = "i001"; + String[] holdingsId = {"9f6b706f-eb88-4d36-92a7-50b03020e881", "e733fc11-c457-4ed7-9ef0-9ea669236a9a"}; + String[] itemId = generateRandomUUIDs(2); + String[] itemHrid = {"it001", "it002"}; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, CREATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, instanceHrid, null, 0, CREATE, INSTANCE, COMPLETED, null, null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId[0], itemHrid[0], null, 0, UPDATE, ITEM, COMPLETED, null, null, instanceId, holdingsId[0], null)) + .compose(v -> createJournalRecordAllFields(createdJobExecution.getId(), sourceRecordId, itemId[1], itemHrid[1], null, 0, UPDATE, ITEM, COMPLETED, null, null, instanceId, holdingsId[1], null)) + .onFailure(context::fail); + + future.onComplete(ar -> context.verify(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + .log().all() + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].error", emptyOrNullString()) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.hridList[0]", is(instanceHrid)) + .body("entries[0].relatedInstanceInfo.error", emptyOrNullString()) + .body("entries[0].relatedHoldingsInfo.size()", is(2)) + .body("entries[0].relatedHoldingsInfo[0].id", in(holdingsId)) + .body("entries[0].relatedHoldingsInfo[1].id", in(holdingsId)) + .body("entries[0].relatedItemInfo[0].id", in(itemId)) + .body("entries[0].relatedItemInfo[0].hrid", in(itemHrid)) + .body("entries[0].relatedItemInfo[0].error", emptyOrNullString()) + .body("entries[0].relatedItemInfo[1].id", in(itemId)) + .body("entries[0].relatedItemInfo[1].hrid", in(itemHrid)) + .body("entries[0].relatedItemInfo[1].error", emptyOrNullString()) + .body("entries[0].relatedInvoiceInfo.idList", empty()) + .body("entries[0].relatedInvoiceInfo.hridList", empty()) + .body("entries[0].relatedInvoiceInfo.error", emptyOrNullString()); + async.complete(); + })); + } + + @Test + public void shouldReturnCentralTenantIdForMarcRecordAndInstanceIfItIsSavedInJournalRecordRecordProcessingLogDTOCollection(TestContext context) { + JobExecution createdJobExecution = constructAndPostInitJobExecutionRqDto(1).getJobExecutions().get(0); + String sourceRecordId = UUID.randomUUID().toString(); + String instanceId = UUID.randomUUID().toString(); + String recordTitle = "test title"; + String expectedCentralTenantId = "mobius"; + + Future future = Future.succeededFuture() + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, null, null, recordTitle, 0, UPDATE, MARC_BIBLIOGRAPHIC, COMPLETED, null, null, expectedCentralTenantId)) + .compose(v -> createJournalRecord(createdJobExecution.getId(), sourceRecordId, instanceId, "in00000000001", null, 0, UPDATE, INSTANCE, COMPLETED, null, null, expectedCentralTenantId)); + + future.onComplete(context.asyncAssertSuccess(v -> + RestAssured.given() + .spec(spec) + .when() + .get(GET_JOB_EXECUTION_JOURNAL_RECORDS_PATH + "/" + createdJobExecution.getId()) + .then() + .statusCode(HttpStatus.SC_OK) + + .body("entries.size()", is(1)) + .body("totalRecords", is(1)) + .body("entries[0].jobExecutionId", is(createdJobExecution.getId())) + .body("entries[0].sourceRecordId", is(sourceRecordId)) + .body("entries[0].sourceRecordTitle", is(recordTitle)) + .body("entries[0].sourceRecordOrder", is("0")) + .body("entries[0].sourceRecordTenantId", is(expectedCentralTenantId)) + .body("entries[0].relatedInstanceInfo.idList[0]", is(instanceId)) + .body("entries[0].relatedInstanceInfo.tenantId", is(expectedCentralTenantId)) + .body("entries[0].error", emptyOrNullString()))); + } + + private Future createJournalRecord(String jobExecutionId, String sourceId, String entityId, + String entityHrid, String title, int recordOrder, + JournalRecord.ActionType actionType, JournalRecord.EntityType entityType, + JournalRecord.ActionStatus actionStatus, String errorMessage, String orderId) { + return createJournalRecord(jobExecutionId, sourceId, entityId, entityHrid, title, recordOrder, actionType, + entityType, actionStatus, errorMessage, orderId, null); + } + + private Future createJournalRecord(String jobExecutionId, String sourceId, String entityId, + String entityHrid, String title, int recordOrder, + JournalRecord.ActionType actionType, JournalRecord.EntityType entityType, + JournalRecord.ActionStatus actionStatus, String errorMessage, + String orderId, String tenantId) { + JournalRecord journalRecord = new JournalRecord() + .withJobExecutionId(jobExecutionId) + .withSourceId(sourceId) + .withTitle(title) + .withSourceRecordOrder(recordOrder) + .withEntityType(entityType) + .withActionType(actionType) + .withActionStatus(actionStatus) + .withError(errorMessage) + .withActionDate(new Date()) + .withEntityId(entityId) + .withEntityHrId(entityHrid) + .withOrderId(orderId) + .withTenantId(tenantId); + return journalRecordDao.save(journalRecord, TENANT_ID).map(journalRecord); + } + + private Future createJournalRecordAllFields(String jobExecutionId, String sourceId, String entityId, String entityHrid, String title, int recordOrder, JournalRecord.ActionType actionType, + JournalRecord.EntityType entityType, JournalRecord.ActionStatus actionStatus, String errorMessage, String orderId, String instanceId, String holdingsId, String permanentLocation) { + JournalRecord journalRecord = new JournalRecord() + .withJobExecutionId(jobExecutionId) + .withSourceId(sourceId) + .withTitle(title) + .withSourceRecordOrder(recordOrder) + .withEntityType(entityType) + .withActionType(actionType) + .withActionStatus(actionStatus) + .withError(errorMessage) + .withActionDate(new Date()) + .withEntityId(entityId) + .withEntityHrId(entityHrid) + .withOrderId(orderId) + .withInstanceId(instanceId) + .withHoldingsId(holdingsId) + .withPermanentLocationId(permanentLocation); + return journalRecordDao.save(journalRecord, TENANT_ID).map(journalRecord); + } + + private String[] generateRandomUUIDs(int n) { + return IntStream.range(0, n).mapToObj(i -> UUID.randomUUID().toString()).toArray(String[]::new); + } +} diff --git a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetadataProviderJobExecutionAPITest.java b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetadataProviderJobExecutionAPITest.java index c4ec92b7f..f0542be2c 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetadataProviderJobExecutionAPITest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/rest/impl/metadataProvider/MetadataProviderJobExecutionAPITest.java @@ -9,6 +9,7 @@ import io.vertx.ext.unit.TestContext; import io.vertx.ext.unit.junit.VertxUnitRunner; import org.apache.http.HttpStatus; +import org.folio.dao.IncomingRecordDaoImpl; import org.folio.dao.JournalRecordDaoImpl; import org.folio.dao.util.PostgresClientFactory; import org.folio.rest.impl.AbstractRestTest; @@ -16,6 +17,7 @@ import org.folio.rest.jaxrs.model.DeleteJobExecutionsReq; import org.folio.rest.jaxrs.model.DeleteJobExecutionsResp; import org.folio.rest.jaxrs.model.EntityType; +import org.folio.rest.jaxrs.model.IncomingRecord; import org.folio.rest.jaxrs.model.InitJobExecutionsRsDto; import org.folio.rest.jaxrs.model.JobExecution; import org.folio.rest.jaxrs.model.JobExecutionDto; @@ -96,6 +98,7 @@ public class MetadataProviderJobExecutionAPITest extends AbstractRestTest { private static final String GET_JOB_EXECUTION_SUMMARY_PATH = "/metadata-provider/jobSummary"; private static final String GET_JOB_EXECUTION_JOB_PROFILES_PATH = "/metadata-provider/jobExecutions/jobProfiles"; private static final String GET_UNIQUE_USERS_INFO = "/metadata-provider/jobExecutions/users"; + private static final String GET_INCOMING_RECORDS_BY_ID = "/metadata-provider/incomingRecords/"; private final JsonObject userResponse = new JsonObject() .put("users", @@ -109,6 +112,9 @@ public class MetadataProviderJobExecutionAPITest extends AbstractRestTest { @Spy @InjectMocks private JournalRecordDaoImpl journalRecordDao; + @Spy + @InjectMocks + private IncomingRecordDaoImpl incomingRecordDao; private AutoCloseable mocks; @Before @@ -1748,4 +1754,39 @@ public void shouldNotReturnUsersForParentJobExecutions() { .body("totalRecords", is(0)); } + @Test + public void shouldReturnNotFoundIncomingRecordById() { + RestAssured.given() + .spec(spec) + .when() + .get(GET_INCOMING_RECORDS_BY_ID + UUID.randomUUID()) + .then() + .statusCode(HttpStatus.SC_NOT_FOUND); + } + + @Test + public void shouldReturnIncomingRecordById(TestContext context) { + Async async = context.async(); + List createdJobExecutions = constructAndPostInitJobExecutionRqDto(1).getJobExecutions(); + JobExecution jobExecution = createdJobExecutions.get(0); + String jobExecutionId = jobExecution.getId(); + String id = UUID.randomUUID().toString(); + + IncomingRecord incomingRecord = new IncomingRecord() + .withId(id).withJobExecutionId(jobExecutionId).withRecordType(IncomingRecord.RecordType.MARC_BIB).withOrder(0) + .withRawRecordContent("rawRecord").withParsedRecordContent("parsedRecord"); + + incomingRecordDao.saveBatch(List.of(incomingRecord), TENANT_ID) + .onComplete(v -> { + RestAssured.given() + .spec(spec) + .when() + .get(GET_INCOMING_RECORDS_BY_ID + id) + .then() + .statusCode(HttpStatus.SC_OK) + .body("id", is(id)) + .body("jobExecutionId", is(jobExecutionId)); + async.complete(); + }); + } } diff --git a/mod-source-record-manager-server/src/test/java/org/folio/services/ChangeEngineServiceImplTest.java b/mod-source-record-manager-server/src/test/java/org/folio/services/ChangeEngineServiceImplTest.java index f11a39c3c..e891d3bec 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/services/ChangeEngineServiceImplTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/services/ChangeEngineServiceImplTest.java @@ -3,6 +3,7 @@ import static org.folio.rest.jaxrs.model.ActionProfile.Action.CREATE; import static org.folio.rest.jaxrs.model.ActionProfile.FolioRecord.AUTHORITY; import static org.folio.rest.jaxrs.model.ActionProfile.FolioRecord.MARC_AUTHORITY; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_FOR_UPDATE_RECEIVED; import static org.folio.rest.jaxrs.model.ProfileSnapshotWrapper.ContentType.ACTION_PROFILE; import static org.folio.rest.jaxrs.model.ProfileSnapshotWrapper.ContentType.JOB_PROFILE; @@ -22,6 +23,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; @@ -31,12 +33,15 @@ import io.vertx.core.json.Json; import io.vertx.core.json.JsonObject; import io.vertx.kafka.client.producer.KafkaHeader; + +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Optional; import java.util.UUID; import org.folio.MatchProfile; +import org.folio.TestUtil; import org.folio.dao.JobExecutionSourceChunkDao; import org.folio.dataimport.util.OkapiConnectionParams; import org.folio.dataimport.util.marc.MarcRecordAnalyzer; @@ -82,6 +87,7 @@ public class ChangeEngineServiceImplTest { "01119cam a2200349Li 4500001001300000003000600013005001700019008004100036020001800077020001500095035002100110037002200131040002700153043001200180050002700192082001600219090002200235100003300257245002700290264003800317300002300355336002600378337002800404338002700432651006400459945004300523960006200566961001600628980003900644981002300683999006300706\u001Eocn922152790\u001EOCoLC\u001E20150927051630.4\u001E150713s2015 enk 000 f eng d\u001E \u001Fa9780241146064\u001E \u001Fa0241146062\u001E \u001Fa(OCoLC)922152790\u001E \u001Fa12370236\u001Fbybp\u001F5NU\u001E \u001FaYDXCP\u001Fbeng\u001Ferda\u001FcYDXCP\u001E \u001Fae-uk-en\u001E 4\u001FaPR6052.A6488\u001FbN66 2015\u001E04\u001Fa823.914\u001F223\u001E 4\u001Fa823.914\u001FcB2557\u001Fb1\u001E1 \u001FaBarker, Pat,\u001Fd1943-\u001Feauthor.\u001E10\u001FaNoonday /\u001FcPat Barker.\u001E 1\u001FaLondon :\u001FbHamish Hamilton,\u001Fc2015.\u001E \u001Fa258 pages ;\u001Fc24 cm\u001E \u001Fatext\u001Fbtxt\u001F2rdacontent\u001E \u001Faunmediated\u001Fbn\u001F2rdamedia\u001E \u001Favolume\u001Fbnc\u001F2rdacarrier\u001E 0\u001FaLondon (England)\u001FxHistory\u001FyBombardment, 1940-1941\u001FvFiction.\u001E \u001Ffh\u001Fg1\u001Fi0000000618391828\u001Flfhgen\u001Fr3\u001Fsv\u001Ft1\u001E \u001Fap\u001Fda\u001Fgh\u001Fim\u001Fjn\u001Fka\u001Fla\u001Fmo\u001Ftfhgen\u001Fo1\u001Fs15.57\u001Fu7ART\u001Fvukapf\u001FzGBP\u001E \u001FbGBP\u001Fm633761\u001E \u001Fa160128\u001Fb1899\u001Fd156\u001Fe1713\u001Ff654270\u001Fg1\u001E \u001Faukapf\u001Fb7ART\u001Fcfhgen\u001E \u001Fdm\u001Fea\u001Ffx\u001Fgeng\u001FiTesting with subfield i\u001FsAnd with subfield s\u001E\u001D"; private static final String MARC_BIB_REC_WITH_FF = "00861cam a2200193S1 45 0001000700000002000900007003000400016008004100020035002200061035001300083099001600096245005600112500011600168500019600284600003500480610003400515610003900549999007900588\u001E304162\u001E00320061\u001EPBL\u001E020613n 000 0 eng u\u001E \u001Fa(Sirsi)sc99900001\u001E \u001Fa(Sirsi)1\u001E \u001FaSC LVF M698\u001E00\u001FaMohler, Harold S. (Lehigh Collection Vertical File)\u001E \u001FaMaterial on this topic is contained in the Lehigh Collection Vertical File. See Special Collections for access.\u001E \u001FaContains press releases, versions of resumes, clippings, biographical information. L-in-Life program, and memorial service program -- Documents related Hershey Food Corporation. In two parts.\u001E10\u001FaMohler, Harold S.,\u001Fd1919-1988.\u001E20\u001FaLehigh University.\u001FbTrustees.\u001E20\u001FaLehigh University.\u001FbClass of 1948.\u001Eff\u001Fi29573076-a7ee-462a-8f9b-2659ab7df23c\u001Fs7ca42730-9ba6-4bc8-98d3-f068728504c9\u001E\u001D"; + private static final String RAW_EDIFACT_RECORD_PATH = "src/test/resources/records/edifact/565751us20210122.edi"; @Mock private JobExecutionSourceChunkDao jobExecutionSourceChunkDao; @@ -101,6 +107,10 @@ public class ChangeEngineServiceImplTest { private JobProfileSnapshotValidationService jobProfileSnapshotValidationService; @Mock private FieldModificationService fieldModificationService; + @Mock + private IncomingRecordService incomingRecordService; + @Mock + private JournalRecordService journalRecordService; @Captor private ArgumentCaptor> kafkaHeadersCaptor; @@ -328,6 +338,8 @@ public void shouldReturnMarcBibRecord() { when(jobExecutionSourceChunkDao.getById(any(), any())) .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); + when(recordsPublishingService.sendEventsWithRecords(any(), any(), any(), any())) + .thenReturn(Future.succeededFuture()); Future> serviceFuture = executeWithKafkaMock(rawRecordsDto, jobExecution, Future.succeededFuture(true)); @@ -338,8 +350,34 @@ public void shouldReturnMarcBibRecord() { } @Test - public void shouldReturnMarcBibRecordWith999ByAcceptInstanceId() { + public void shouldReturnEdifactRecord() throws IOException { + RawRecordsDto rawRecordsDto = new RawRecordsDto() + .withId(UUID.randomUUID().toString()) + .withRecordsMetadata(new RecordsMetadata().withContentType(RecordsMetadata.ContentType.EDIFACT_RAW)) + .withInitialRecords(Collections.singletonList(new InitialRecord().withRecord(TestUtil.readFileFromPath(RAW_EDIFACT_RECORD_PATH)))); + JobExecution jobExecution = new JobExecution() + .withId(UUID.randomUUID().toString()) + .withUserId(UUID.randomUUID().toString()) + .withJobProfileSnapshotWrapper(new ProfileSnapshotWrapper()) + .withJobProfileInfo(new JobProfileInfo().withId(UUID.randomUUID().toString()) + .withName("test").withDataType(JobProfileInfo.DataType.EDIFACT)); + + when(jobExecutionSourceChunkDao.getById(any(), any())) + .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); + when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); + when(recordsPublishingService.sendEventsWithRecords(any(), any(), any(), any())) + .thenReturn(Future.succeededFuture()); + Future> serviceFuture = executeWithKafkaMock(rawRecordsDto, jobExecution, Future.succeededFuture(true)); + + var actual = serviceFuture.result(); + assertThat(actual, hasSize(1)); + assertThat(actual.get(0).getRecordType(), equalTo(Record.RecordType.EDIFACT)); + assertThat(actual.get(0).getErrorRecord(), nullValue()); + } + + @Test + public void shouldReturnMarcBibRecordWith999ByAcceptInstanceId() { RawRecordsDto rawRecordsDto = getTestRawRecordsDto(MARC_BIB_REC_WITH_FF); JobExecution jobExecution = new JobExecution() .withId(UUID.randomUUID().toString()) @@ -348,15 +386,15 @@ public void shouldReturnMarcBibRecordWith999ByAcceptInstanceId() { .withJobProfileInfo(new JobProfileInfo().withId(UUID.randomUUID().toString()) .withName("test").withDataType(JobProfileInfo.DataType.MARC)); - boolean acceptInstanceId = true; - when(marcRecordAnalyzer.process(any())).thenReturn(MarcRecordType.BIB); when(jobExecutionSourceChunkDao.getById(any(), any())) .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); + when(recordsPublishingService.sendEventsWithRecords(any(), any(), any(), any())) + .thenReturn(Future.succeededFuture()); Future> serviceFuture = - executeWithKafkaMock(rawRecordsDto, jobExecution, Future.succeededFuture(true), acceptInstanceId); + executeWithKafkaMock(rawRecordsDto, jobExecution, Future.succeededFuture(true), true); var actual = serviceFuture.result(); assertThat(actual, hasSize(1)); @@ -375,6 +413,8 @@ public void shouldReturnMarcBibRecordWithIds() { when(jobExecutionSourceChunkDao.getById(any(), any())) .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); + when(recordsPublishingService.sendEventsWithRecords(any(), any(), any(), any())) + .thenReturn(Future.succeededFuture()); Future> serviceFuture = executeWithKafkaMock(rawRecordsDto, jobExecution, Future.succeededFuture(true)); @@ -449,7 +489,6 @@ public void shouldNotUpdateIfRecordTypeIsNotMarcBib() { .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); - try (var mockedStatic = Mockito.mockStatic(EventHandlingUtil.class)) { mockedStatic.when(() -> EventHandlingUtil.sendEventToKafka(any(), any(), any(), kafkaHeadersCaptor.capture(), any(), any())) .thenReturn(Future.succeededFuture(true)); @@ -479,7 +518,7 @@ public void shouldNotUpdateIfCreateInstanceActionExist() { service.parseRawRecordsChunkForJobExecution(rawRecordsDto, jobExecution, "1", false, okapiConnectionParams).result(); } - verify(recordsPublishingService, never()).sendEventsWithRecords(any(), any(), any(), any()); + verify(recordsPublishingService, times(1)).sendEventsWithRecords(any(), any(), any(), eq(DI_INCOMING_MARC_BIB_RECORD_PARSED.value())); } @Test @@ -578,7 +617,6 @@ private void mockServicesForParseRawRecordsChunkForJobExecution() { when(jobExecutionSourceChunkDao.getById(any(), any())) .thenReturn(Future.succeededFuture(Optional.of(new JobExecutionSourceChunk()))); when(jobExecutionSourceChunkDao.update(any(), any())).thenReturn(Future.succeededFuture(new JobExecutionSourceChunk())); - } ProfileSnapshotWrapper constructCreateInstanceSnapshotWrapper() { @@ -595,7 +633,7 @@ ProfileSnapshotWrapper constructCreateInstanceSnapshotWrapper() { .withAction(CREATE) .withFolioRecord(ActionProfile.FolioRecord.INSTANCE))).getMap()) )); - }; + } private ProfileSnapshotWrapper constructCreateMarcHoldingsAndInstanceSnapshotWrapper() { return new ProfileSnapshotWrapper() diff --git a/mod-source-record-manager-server/src/test/java/org/folio/services/EventDrivenChunkProcessingServiceImplTest.java b/mod-source-record-manager-server/src/test/java/org/folio/services/EventDrivenChunkProcessingServiceImplTest.java index 4bf96108c..8ea0deae5 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/services/EventDrivenChunkProcessingServiceImplTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/services/EventDrivenChunkProcessingServiceImplTest.java @@ -9,6 +9,7 @@ import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static java.util.Collections.emptyList; import static org.folio.dataimport.util.RestUtil.OKAPI_URL_HEADER; import static org.folio.rest.jaxrs.model.StatusDto.Status.PARSING_IN_PROGRESS; import static org.folio.rest.util.OkapiConnectionParams.OKAPI_TENANT_HEADER; @@ -43,6 +44,7 @@ import org.folio.dao.JobExecutionProgressDaoImpl; import org.folio.dao.JobExecutionSourceChunkDaoImpl; import org.folio.dao.JournalRecordDaoImpl; +import org.folio.dao.IncomingRecordDaoImpl; import org.folio.dao.MappingParamsSnapshotDaoImpl; import org.folio.dao.MappingRuleDaoImpl; import org.folio.dao.MappingRulesSnapshotDaoImpl; @@ -112,6 +114,12 @@ public class EventDrivenChunkProcessingServiceImplTest extends AbstractRestTest private JobExecutionServiceImpl jobExecutionService; @InjectMocks @Spy + private IncomingRecordServiceImpl incomingRecordService; + @InjectMocks + @Spy + private IncomingRecordDaoImpl incomingRecordDao; + @InjectMocks + @Spy private JournalRecordServiceImpl journalRecordService; @InjectMocks @Spy @@ -134,8 +142,6 @@ public class EventDrivenChunkProcessingServiceImplTest extends AbstractRestTest @InjectMocks private MappingParamsSnapshotDaoImpl mappingParamsSnapshotDao; @Spy - private RecordsPublishingService recordsPublishingService; - @Spy @InjectMocks private FieldModificationServiceImpl fieldModificationService; @@ -183,12 +189,15 @@ public void setUp() throws IOException { mappingParametersProvider = when(mock(MappingParametersProvider.class).get(anyString(), any(OkapiConnectionParams.class))).thenReturn(Future.succeededFuture(new MappingParameters())).getMock(); mappingMetadataService = new MappingMetadataServiceImpl(mappingParametersProvider, mappingRuleService, mappingRulesSnapshotDao, mappingParamsSnapshotDao); - JobProfileSnapshotValidationServiceImpl jobProfileSnapshotValidationService = new JobProfileSnapshotValidationServiceImpl(); + + RecordsPublishingService recordsPublishingService = new RecordsPublishingServiceImpl(jobExecutionService, + new DataImportPayloadContextBuilderImpl(marcRecordAnalyzer), kafkaConfig, emptyList()); changeEngineService = new ChangeEngineServiceImpl(jobExecutionSourceChunkDao, jobExecutionService, marcRecordAnalyzer, - hrIdFieldService, recordsPublishingService, mappingMetadataService, jobProfileSnapshotValidationService, kafkaConfig, - fieldModificationService); + hrIdFieldService, recordsPublishingService, mappingMetadataService, new JobProfileSnapshotValidationServiceImpl(), kafkaConfig, + fieldModificationService, incomingRecordService, journalRecordService); ReflectionTestUtils.setField(changeEngineService, "maxDistributionNum", 10); ReflectionTestUtils.setField(changeEngineService, "batchSize", 100); + ReflectionTestUtils.setField(recordsPublishingService, "maxDistributionNum", 100); chunkProcessingService = new EventDrivenChunkProcessingServiceImpl(jobExecutionSourceChunkDao, jobExecutionService, changeEngineService, jobExecutionProgressService); HashMap headers = new HashMap<>(); diff --git a/mod-source-record-manager-server/src/test/java/org/folio/services/IncomingRecordServiceImplUnitTest.java b/mod-source-record-manager-server/src/test/java/org/folio/services/IncomingRecordServiceImplUnitTest.java new file mode 100644 index 000000000..9118b25f8 --- /dev/null +++ b/mod-source-record-manager-server/src/test/java/org/folio/services/IncomingRecordServiceImplUnitTest.java @@ -0,0 +1,40 @@ +package org.folio.services; + +import org.folio.dao.IncomingRecordDao; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnitRunner; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; + +@RunWith(MockitoJUnitRunner.class) +public class IncomingRecordServiceImplUnitTest { + + @Mock + private IncomingRecordDao incomingRecordDao; + + @InjectMocks + private IncomingRecordService incomingRecordService = new IncomingRecordServiceImpl(); + + @Before + public void setUp() { + MockitoAnnotations.openMocks(this); + } + + @Test + public void shouldGetById() { + incomingRecordService.getById(any(), any()); + verify(incomingRecordDao).getById(any(), any()); + } + + @Test + public void shouldSaveBatch() { + incomingRecordService.saveBatch(any(), any()); + verify(incomingRecordDao).saveBatch(any(), any()); + } +} diff --git a/mod-source-record-manager-server/src/test/java/org/folio/services/JournalUtilTest.java b/mod-source-record-manager-server/src/test/java/org/folio/services/JournalUtilTest.java index 6aa4e30cd..8a85b4654 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/services/JournalUtilTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/services/JournalUtilTest.java @@ -6,7 +6,11 @@ import io.vertx.ext.unit.junit.VertxUnitRunner; import org.folio.DataImportEventPayload; import org.folio.Record; +import org.folio.rest.jaxrs.model.ErrorRecord; import org.folio.rest.jaxrs.model.JournalRecord; +import org.folio.rest.jaxrs.model.IncomingRecord; +import org.folio.rest.jaxrs.model.ParsedRecord; +import org.folio.rest.jaxrs.model.RawRecord; import org.folio.services.journal.JournalRecordMapperException; import org.folio.services.journal.JournalUtil; import org.junit.Assert; @@ -18,6 +22,7 @@ import java.util.List; import java.util.UUID; +import static org.assertj.core.api.Assertions.assertThat; import static org.folio.DataImportEventTypes.DI_ERROR; import static org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_UPDATED; import static org.folio.rest.jaxrs.model.JournalRecord.ActionStatus.COMPLETED; @@ -38,6 +43,83 @@ public class JournalUtilTest { private static final String CENTRAL_TENANT_ID_KEY = "CENTRAL_TENANT_ID"; + private static final String CURRENT_EVENT_TYPE = "CURRENT_EVENT_TYPE"; + + @Test + public void shouldBuildJournalRecordsByRecordsWithoutError() { + String recordId = UUID.randomUUID().toString(); + String snapshotId = UUID.randomUUID().toString(); + + org.folio.rest.jaxrs.model.Record record = new org.folio.rest.jaxrs.model.Record() + .withId(recordId) + .withSnapshotId(snapshotId) + .withOrder(0) + .withRecordType(org.folio.rest.jaxrs.model.Record.RecordType.MARC_BIB); + + List journalRecords = JournalUtil.buildJournalRecordsByRecords(List.of(record)); + + assertThat(journalRecords).hasSize(1); + assertThat(journalRecords.get(0).getId()).isNotBlank(); + assertThat(journalRecords.get(0).getJobExecutionId()).isEqualTo(snapshotId); + assertThat(journalRecords.get(0).getSourceId()).isEqualTo(recordId); + assertThat(journalRecords.get(0).getSourceRecordOrder()).isEqualTo(record.getOrder()); + assertThat(journalRecords.get(0).getActionType()).isEqualTo(JournalRecord.ActionType.PARSE); + assertThat(journalRecords.get(0).getActionDate()).isNotNull(); + assertThat(journalRecords.get(0).getActionStatus()).isEqualTo(JournalRecord.ActionStatus.COMPLETED); + assertThat(journalRecords.get(0).getEntityType()).isNull(); + assertThat(journalRecords.get(0).getError()).isNull(); + } + + @Test + public void shouldBuildJournalRecordsByRecordsWithError() { + String recordId = UUID.randomUUID().toString(); + String snapshotId = UUID.randomUUID().toString(); + + ErrorRecord errorRecord = new ErrorRecord().withDescription("error"); + org.folio.rest.jaxrs.model.Record record = new org.folio.rest.jaxrs.model.Record() + .withId(recordId) + .withSnapshotId(snapshotId) + .withOrder(0) + .withRecordType(org.folio.rest.jaxrs.model.Record.RecordType.MARC_BIB) + .withErrorRecord(errorRecord); + + List journalRecords = JournalUtil.buildJournalRecordsByRecords(List.of(record)); + + assertThat(journalRecords).hasSize(1); + assertThat(journalRecords.get(0).getId()).isNotBlank(); + assertThat(journalRecords.get(0).getJobExecutionId()).isEqualTo(snapshotId); + assertThat(journalRecords.get(0).getSourceId()).isEqualTo(recordId); + assertThat(journalRecords.get(0).getSourceRecordOrder()).isEqualTo(record.getOrder()); + assertThat(journalRecords.get(0).getActionType()).isEqualTo(JournalRecord.ActionType.PARSE); + assertThat(journalRecords.get(0).getActionDate()).isNotNull(); + assertThat(journalRecords.get(0).getActionStatus()).isEqualTo(ERROR); + assertThat(journalRecords.get(0).getEntityType()).isNull(); + assertThat(journalRecords.get(0).getError()).isEqualTo(errorRecord.getDescription()); + } + + @Test + public void shouldBuildIncomingRecordsByRecords() { + String recordId = UUID.randomUUID().toString(); + String snapshotId = UUID.randomUUID().toString(); + + org.folio.rest.jaxrs.model.Record record = new org.folio.rest.jaxrs.model.Record() + .withId(recordId) + .withSnapshotId(snapshotId) + .withOrder(0) + .withRawRecord(new RawRecord().withContent("rawRecord")) + .withRecordType(org.folio.rest.jaxrs.model.Record.RecordType.MARC_BIB) + .withParsedRecord(new ParsedRecord().withContent("parsedRecord")); + + List incomingRecords = JournalUtil.buildIncomingRecordsByRecords(List.of(record)); + + assertThat(incomingRecords).hasSize(1); + assertThat(incomingRecords.get(0).getId()).isEqualTo(record.getId()); + assertThat(incomingRecords.get(0).getJobExecutionId()).isEqualTo(snapshotId); + assertThat(incomingRecords.get(0).getOrder()).isEqualTo(record.getOrder()); + assertThat(incomingRecords.get(0).getRawRecordContent()).isEqualTo("rawRecord"); + assertThat(incomingRecords.get(0).getRecordType()).isEqualTo(IncomingRecord.RecordType.MARC_BIB); + assertThat(incomingRecords.get(0).getParsedRecordContent()).isEqualTo("parsedRecord"); + } @Test public void shouldBuildJournalRecordForInstance() throws JournalRecordMapperException { @@ -79,6 +161,48 @@ public void shouldBuildJournalRecordForInstance() throws JournalRecordMapperExce Assert.assertNotNull(journalRecord.get(0).getActionDate()); } + @Test + public void shouldBuildTwoJournalRecordWithInstanceCreatedEvent() throws JournalRecordMapperException { + String instanceId = UUID.randomUUID().toString(); + String instanceHrid = UUID.randomUUID().toString(); + + JsonObject instanceJson = new JsonObject() + .put("id", instanceId) + .put("hrid", instanceHrid); + + String recordId = UUID.randomUUID().toString(); + String snapshotId = UUID.randomUUID().toString(); + + JsonObject recordJson = new JsonObject() + .put("id", recordId) + .put("snapshotId", snapshotId) + .put("order", 1); + + HashMap context = new HashMap<>(); + context.put(INSTANCE.value(), instanceJson.encode()); + context.put(MARC_BIBLIOGRAPHIC.value(), recordJson.encode()); + context.put(CURRENT_EVENT_TYPE, "DI_INVENTORY_INSTANCE_CREATED"); + + DataImportEventPayload eventPayload = new DataImportEventPayload() + .withEventType("DI_COMPLETED") + .withContext(context); + + List journalRecord = JournalUtil.buildJournalRecordsByEvent(eventPayload, + CREATE, INSTANCE, COMPLETED); + + Assert.assertNotNull(journalRecord); + Assert.assertEquals(2, journalRecord.size()); + Assert.assertEquals(snapshotId, journalRecord.get(0).getJobExecutionId()); + Assert.assertEquals(recordId, journalRecord.get(0).getSourceId()); + Assert.assertEquals(1, journalRecord.get(0).getSourceRecordOrder().intValue()); + Assert.assertEquals(INSTANCE, journalRecord.get(0).getEntityType()); + Assert.assertEquals(instanceId, journalRecord.get(0).getEntityId()); + Assert.assertEquals(instanceHrid, journalRecord.get(0).getEntityHrId()); + Assert.assertEquals(CREATE, journalRecord.get(0).getActionType()); + Assert.assertEquals(COMPLETED, journalRecord.get(0).getActionStatus()); + Assert.assertNotNull(journalRecord.get(0).getActionDate()); + } + @Test public void shouldBuildJournalRecordForAuthority() throws JournalRecordMapperException { String authorityId = UUID.randomUUID().toString(); diff --git a/mod-source-record-manager-server/src/test/java/org/folio/services/RecordProcessedEventHandlingServiceImplTest.java b/mod-source-record-manager-server/src/test/java/org/folio/services/RecordProcessedEventHandlingServiceImplTest.java index 27295edec..9df4c1c83 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/services/RecordProcessedEventHandlingServiceImplTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/services/RecordProcessedEventHandlingServiceImplTest.java @@ -6,6 +6,7 @@ import static com.github.tomakehurst.wiremock.client.WireMock.post; import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static java.util.Collections.emptyList; import static org.folio.dataimport.util.RestUtil.OKAPI_URL_HEADER; import static org.folio.rest.jaxrs.model.JobExecution.Status.COMMITTED; import static org.folio.rest.jaxrs.model.JobExecution.Status.ERROR; @@ -42,6 +43,7 @@ import org.folio.dao.JobExecutionProgressDaoImpl; import org.folio.dao.JobExecutionSourceChunkDaoImpl; import org.folio.dao.JournalRecordDaoImpl; +import org.folio.dao.IncomingRecordDaoImpl; import org.folio.dao.MappingParamsSnapshotDaoImpl; import org.folio.dao.MappingRuleDaoImpl; import org.folio.dao.MappingRulesSnapshotDaoImpl; @@ -136,16 +138,18 @@ public class RecordProcessedEventHandlingServiceImplTest extends AbstractRestTes @Spy @InjectMocks private FieldModificationServiceImpl fieldModificationService; - @Spy - RecordsPublishingService recordsPublishingService; - private MappingRuleCache mappingRuleCache; - private ChangeEngineService changeEngineService; + @InjectMocks + private IncomingRecordServiceImpl incomingRecordService; + @Spy + @InjectMocks + private JournalRecordServiceImpl journalRecordService; + @Spy + @InjectMocks + private IncomingRecordDaoImpl incomingRecordDao; private ChunkProcessingService chunkProcessingService; private RecordProcessedEventHandlingServiceImpl recordProcessedEventHandlingService; private OkapiConnectionParams params; - private MappingMetadataService mappingMetadataService; - private KafkaConfig kafkaConfig; private InitJobExecutionsRqDto initJobExecutionsRqDto = new InitJobExecutionsRqDto() .withFiles(Collections.singletonList(new File().withName("importBib1.bib"))) @@ -166,17 +170,10 @@ public class RecordProcessedEventHandlingServiceImplTest extends AbstractRestTes .withId(jobProfile.getId()) .withDataType(DataType.MARC); - private final JsonObject userResponse = new JsonObject() - .put("users", - new JsonArray().add(new JsonObject() - .put("username", "diku_admin") - .put("personal", new JsonObject().put("firstName", "DIKU").put("lastName", "ADMINISTRATOR")))) - .put("totalRecords", 1); - @Before public void setUp() throws IOException { String[] hostAndPort = kafkaCluster.getBrokerList().split(":"); - kafkaConfig = KafkaConfig.builder() + KafkaConfig kafkaConfig = KafkaConfig.builder() .kafkaHost(hostAndPort[0]) .kafkaPort(hostAndPort[1]) .envId(KAFKA_ENV_ID) @@ -185,17 +182,21 @@ public void setUp() throws IOException { MockitoAnnotations.openMocks(this); - mappingRuleCache = new MappingRuleCache(mappingRuleDao, vertx); + MappingRuleCache mappingRuleCache = new MappingRuleCache(mappingRuleDao, vertx); marcRecordAnalyzer = new MarcRecordAnalyzer(); mappingRuleService = new MappingRuleServiceImpl(mappingRuleDao, mappingRuleCache); mappingRuleDao = when(mock(MappingRuleDaoImpl.class).get(any(), anyString())).thenReturn(Future.succeededFuture(Optional.of(new JsonObject(rules)))).getMock(); mappingParametersProvider = when(mock(MappingParametersProvider.class).get(anyString(), any(OkapiConnectionParams.class))).thenReturn(Future.succeededFuture(new MappingParameters())).getMock(); - mappingMetadataService = new MappingMetadataServiceImpl(mappingParametersProvider, mappingRuleService, mappingRulesSnapshotDao, mappingParamsSnapshotDao); + MappingMetadataService mappingMetadataService = new MappingMetadataServiceImpl(mappingParametersProvider, mappingRuleService, mappingRulesSnapshotDao, mappingParamsSnapshotDao); JobProfileSnapshotValidationServiceImpl jobProfileSnapshotValidationService = new JobProfileSnapshotValidationServiceImpl(); - changeEngineService = new ChangeEngineServiceImpl(jobExecutionSourceChunkDao, jobExecutionService, marcRecordAnalyzer, hrIdFieldService , recordsPublishingService, mappingMetadataService, jobProfileSnapshotValidationService, kafkaConfig, - fieldModificationService); + RecordsPublishingService recordsPublishingService = new RecordsPublishingServiceImpl(jobExecutionService, + new DataImportPayloadContextBuilderImpl(marcRecordAnalyzer), kafkaConfig, emptyList()); + ChangeEngineService changeEngineService = new ChangeEngineServiceImpl(jobExecutionSourceChunkDao, jobExecutionService, marcRecordAnalyzer, + hrIdFieldService, recordsPublishingService, mappingMetadataService, jobProfileSnapshotValidationService, kafkaConfig, fieldModificationService, + incomingRecordService, journalRecordService); ReflectionTestUtils.setField(changeEngineService, "maxDistributionNum", 10); ReflectionTestUtils.setField(changeEngineService, "batchSize", 100); + ReflectionTestUtils.setField(recordsPublishingService, "maxDistributionNum", 100); chunkProcessingService = new EventDrivenChunkProcessingServiceImpl(jobExecutionSourceChunkDao, jobExecutionService, changeEngineService, jobExecutionProgressService); recordProcessedEventHandlingService = new RecordProcessedEventHandlingServiceImpl(jobExecutionProgressService, jobExecutionService); HashMap headers = new HashMap<>(); diff --git a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/DataImportJournalConsumerVerticleTest.java b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/DataImportJournalConsumerVerticleTest.java index 73028baba..921f1cbec 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/DataImportJournalConsumerVerticleTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/DataImportJournalConsumerVerticleTest.java @@ -9,7 +9,6 @@ import io.vertx.kafka.client.consumer.KafkaConsumerRecord; import io.vertx.kafka.client.consumer.impl.KafkaConsumerRecordImpl; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.common.header.internals.RecordHeader; import org.folio.ActionProfile; import org.folio.DataImportEventPayload; import org.folio.dao.JobExecutionDaoImpl; @@ -30,11 +29,9 @@ import org.junit.Test; import org.junit.runner.RunWith; -import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.UUID; -import java.util.concurrent.ExecutionException; import static org.folio.kafka.KafkaTopicNameHelper.getDefaultNameSpace; import static org.folio.rest.jaxrs.model.DataImportEventTypes.*; @@ -127,7 +124,7 @@ public void testJournalInventoryInstanceCreatedAction(TestContext context) { } @Test - public void testJournalMarcBibRecordUpdatedAction(TestContext context) throws IOException { + public void testJournalMarcBibRecordUpdatedAction(TestContext context) { Async async = context.async(); // given @@ -155,7 +152,7 @@ public void testJournalMarcBibRecordUpdatedAction(TestContext context) throws IO } @Test - public void testJournalMarcHoldingsRecordCreatedAction(TestContext context) throws IOException { + public void testJournalMarcHoldingsRecordCreatedAction(TestContext context) { Async async = context.async(); // given @@ -183,7 +180,7 @@ public void testJournalMarcHoldingsRecordCreatedAction(TestContext context) thro } @Test - public void testJournalCompletedAction(TestContext context) throws IOException, ExecutionException, InterruptedException { + public void testJournalCompletedAction(TestContext context) { Async async = context.async(); // given @@ -222,7 +219,7 @@ public void testJournalCompletedAction(TestContext context) throws IOException, } @Test - public void testJournalErrorAction(TestContext context) throws IOException, ExecutionException, InterruptedException { + public void testJournalErrorAction(TestContext context) { Async async = context.async(); // given @@ -244,7 +241,7 @@ public void testJournalErrorAction(TestContext context) throws IOException, Exec .withId(UUID.randomUUID().toString()) .withContentType(ACTION_PROFILE) .withContent(JsonObject.mapFrom(new ActionProfile().withFolioRecord(ActionProfile.FolioRecord.HOLDINGS)))) - .withEventsChain(List.of(DI_SRS_MARC_BIB_RECORD_CREATED.value(), DI_INVENTORY_HOLDING_CREATED.value())); + .withEventsChain(List.of(DI_INCOMING_MARC_BIB_RECORD_PARSED.value(), DI_INVENTORY_HOLDING_CREATED.value())); // when KafkaConsumerRecord kafkaConsumerRecord = buildKafkaConsumerRecord(eventPayload); @@ -260,7 +257,7 @@ public void testJournalErrorAction(TestContext context) throws IOException, Exec } @Test - public void testJournalRecordMappingError(TestContext context) throws IOException, ExecutionException, InterruptedException { + public void testJournalRecordMappingError(TestContext context) { Async async = context.async(); // given diff --git a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/RawMarcChunkConsumersVerticleTest.java b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/RawMarcChunkConsumersVerticleTest.java index 3c9e2dca2..ec77224a8 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/RawMarcChunkConsumersVerticleTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/RawMarcChunkConsumersVerticleTest.java @@ -22,7 +22,6 @@ import org.folio.rest.jaxrs.model.DataImportEventPayload; import org.folio.rest.jaxrs.model.DataImportEventTypes; import org.folio.rest.jaxrs.model.EntityType; -import org.folio.rest.jaxrs.model.ErrorRecord; import org.folio.rest.jaxrs.model.Event; import org.folio.rest.jaxrs.model.InitJobExecutionsRsDto; import org.folio.rest.jaxrs.model.InitialRecord; @@ -34,7 +33,6 @@ import org.folio.rest.jaxrs.model.ProfileSnapshotWrapper; import org.folio.rest.jaxrs.model.RawRecordsDto; import org.folio.rest.jaxrs.model.Record; -import org.folio.rest.jaxrs.model.RecordCollection; import org.folio.rest.jaxrs.model.RecordsMetadata; import org.folio.verticle.consumers.errorhandlers.RawMarcChunksErrorHandler; import org.junit.Before; @@ -67,7 +65,7 @@ public class RawMarcChunkConsumersVerticleTest extends AbstractRestTest { private static final String RAW_RECORD_WITH_999_ff_field = "00948nam a2200241 a 4500001000800000003000400008005001700012008004100029035002100070035002000091040002300111041001300134100002300147245007900170260005800249300002400307440007100331650003600402650005500438650006900493655006500562999007900627\u001E1007048\u001EICU\u001E19950912000000.0\u001E891218s1983 wyu d 00010 eng d\u001E \u001Fa(ICU)BID12424550\u001E \u001Fa(OCoLC)16105467\u001E \u001FaPAU\u001FcPAU\u001Fdm/c\u001FdICU\u001E0 \u001Faeng\u001Faarp\u001E1 \u001FaSalzmann, Zdeněk\u001E10\u001FaDictionary of contemporary Arapaho usage /\u001Fccompiled by Zdeněk Salzmann.\u001E0 \u001FaWind River, Wyoming :\u001FbWind River Reservation,\u001Fc1983.\u001E \u001Fav, 231 p. ;\u001Fc28 cm.\u001E 0\u001FaArapaho language and culture instructional materials series\u001Fvno. 4\u001E 0\u001FaArapaho language\u001FxDictionaries.\u001E 0\u001FaIndians of North America\u001FxLanguages\u001FxDictionaries.\u001E 7\u001FaArapaho language.\u001F2fast\u001F0http://id.worldcat.org/fast/fst00812722\u001E 7\u001FaDictionaries.\u001F2fast\u001F0http://id.worldcat.org/fast/fst01423826\u001Eff\u001Fie27a5374-0857-462e-ac84-fb4795229c7a\u001Fse27a5374-0857-462e-ac84-fb4795229c7a\u001E\u001D"; - + private static final String CORRECT_RAW_RECORD = "01240cas a2200397 450000100070000000500170000700800410002401000170006502200140008203500260009603500220012203500110014403500190015504000440017405000150021808200110023322200420024424500430028626000470032926500380037630000150041431000220042932100250045136200230047657000290049965000330052865000450056165500420060670000450064885300180069386300230071190200160073490500210075094800370077195000340080836683220141106221425.0750907c19509999enkqr p 0 a0eng d a 58020553  a0022-0469 a(CStRLIN)NYCX1604275S a(NIC)notisABP6388 a366832 a(OCoLC)1604275 dCtYdMBTIdCtYdMBTIdNICdCStRLINdNIC0 aBR140b.J6 a270.0504aThe Journal of ecclesiastical history04aThe Journal of ecclesiastical history. aLondon,bCambridge University Press [etc.] a32 East 57th St., New York, 10022 av.b25 cm. aQuarterly,b1970- aSemiannual,b1950-690 av. 1- Apr. 1950- aEditor: C. W. Dugmore. 0aChurch historyxPeriodicals. 7aChurch history2fast0(OCoLC)fst00860740 7aPeriodicals2fast0(OCoLC)fst014116411 aDugmore, C. W.q(Clifford William),eed.0381av.i(year)4081a1-49i1950-1998 apfndbLintz a19890510120000.02 a20141106bmdbatcheltsxaddfast lOLINaBR140b.J86h01/01/01 N01542ccm a2200361 "; private static final String INVALID_RECORD = "00557nam a22002053i 4500001001200000005001700012008004100029020001800070040002100088041000800109100001900117245004400136250001200180264001800192336002600210337002800236338002700264700001900291999004100310\u001E00000010150\u001E20230724074007.2\u001E230724|2020|||||||||||| |||||und||\u001E \u001Fa9788408232421\u001E\\\\\u001FaCC-ClU\u001Fbspa\u001Ferda\u001E\\\\\u001Faspa\u001E1 \u001FaChicot, Marcos\u001E00\u001FaEl asesinato de Platón / Chicot Marcos\u001E \u001FaPrimera\u001E 1\u001FbPlaneta\u001Fc2020\u001E \u001Fatext\u001Fbtxt\u001F2rdacontent\u001E \u001Faunmediated\u001Fbn\u001F2rdamedia\u001E \u001Favolume\u001Fbnc\u001F2rdacarrier\u001E1 \u001FaChicot, Marcos\u001Eff\u001Fi7e1ea9dd-f65d-4758-a738-fa1d61365267\u001E\u001D"; private static final String RAW_EDIFACT_RECORD_PATH = "src/test/resources/records/edifact/565751us20210122.edi"; private static final String JOB_PROFILE_PATH = "/jobProfile"; @@ -162,11 +160,11 @@ public void shouldNotFillInInstanceIdAndInstanceHridWhenRecordContains999FieldWi kafkaCluster.send(request); // then - Event obtainedEvent = checkEventWithTypeSent(DI_RAW_RECORDS_CHUNK_PARSED); - RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - assertEquals(1, recordCollection.getRecords().size()); - Record record = recordCollection.getRecords().get(0); - assertNull(record.getExternalIdsHolder()); + Event obtainedEvent = checkEventWithTypeSent(DI_ERROR); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + assertEquals("A new Instance was not created because the incoming record already contained a 999ff$s or 999ff$i field", + new JsonObject(eventPayload.getContext().get("ERROR")).getString("error")); + assertNull(new JsonObject(eventPayload.getContext().get("MARC_BIBLIOGRAPHIC")).getString("externalIdsHolder")); } @Test @@ -182,11 +180,10 @@ public void shouldParseAndPublishChunkWithEdifactRecord() throws InterruptedExce kafkaCluster.send(request); // then - Event obtainedEvent = checkEventWithTypeSent(DI_RAW_RECORDS_CHUNK_PARSED); - RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - assertEquals(1, recordCollection.getRecords().size()); - Record record = recordCollection.getRecords().get(0); - assertEquals(EDIFACT, record.getRecordType()); + Event obtainedEvent = checkEventWithTypeSent(DI_INCOMING_EDIFACT_RECORD_PARSED); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + JsonObject record = new JsonObject(eventPayload.getContext().get("EDIFACT_INVOICE")); + assertEquals(EDIFACT, Record.RecordType.valueOf(record.getString("recordType"))); } @Test @@ -212,11 +209,10 @@ public void shouldCreateErrorRecordsWhenRecordNotParsed() throws InterruptedExce kafkaCluster.send(request); // then - Event obtainedEvent = checkEventWithTypeSent(DI_RAW_RECORDS_CHUNK_PARSED); - RecordCollection recordCollection = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class); - assertEquals(1, recordCollection.getRecords().size()); - ErrorRecord errorRecord = recordCollection.getRecords().get(0).getErrorRecord(); - assertTrue(errorRecord.getDescription().contains("org.marc4j.MarcException")); + Event obtainedEvent = checkEventWithTypeSent(DI_ERROR); + DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); + JsonObject error = new JsonObject(eventPayload.getContext().get("ERROR")); + assertTrue(error.getString("errors").contains("org.marc4j.MarcException")); } @Test @@ -281,7 +277,7 @@ public void shouldNotObserveValuesWhenJobExecutionIdNotCreated() throws Interrup @Test public void shouldNotSendAnyEventsForDuplicates() throws InterruptedException { // given - RawRecordsDto chunk = getChunk(RecordsMetadata.ContentType.MARC_RAW, RAW_RECORD_WITH_999_ff_field); + RawRecordsDto chunk = getChunk(RecordsMetadata.ContentType.MARC_RAW, CORRECT_RAW_RECORD); JobExecutionSourceChunkDao jobExecutionSourceChunkDao = getBeanFromSpringContext(vertx, org.folio.dao.JobExecutionSourceChunkDao.class); jobExecutionSourceChunkDao.save(new JobExecutionSourceChunk() .withId(chunk.getId()) @@ -301,7 +297,7 @@ public void shouldNotSendAnyEventsForDuplicates() throws InterruptedException { @Test public void shouldNotSendDIErrorWhenReceiveDuplicateEvent() throws InterruptedException { // given - RawRecordsDto chunk = getChunk(RecordsMetadata.ContentType.MARC_RAW, RAW_RECORD_WITH_999_ff_field); + RawRecordsDto chunk = getChunk(RecordsMetadata.ContentType.MARC_RAW, CORRECT_RAW_RECORD); SendKeyValues request = prepareWithSpecifiedEventPayload(JobProfileInfo.DataType.MARC, Json.encode(chunk)); String jobExecutionId = getJobExecutionId(request); @@ -310,7 +306,7 @@ public void shouldNotSendDIErrorWhenReceiveDuplicateEvent() throws InterruptedEx kafkaCluster.send(request); // then - checkEventWithTypeSent(DI_RAW_RECORDS_CHUNK_PARSED); + checkEventWithTypeSent(DI_INCOMING_MARC_BIB_RECORD_PARSED); checkEventWithTypeWasNotSend(jobExecutionId, DI_ERROR); } diff --git a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/StoredRecordChunkConsumersVerticleTest.java b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/StoredRecordChunkConsumersVerticleTest.java index fcee45e72..5c6d652b0 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/StoredRecordChunkConsumersVerticleTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/StoredRecordChunkConsumersVerticleTest.java @@ -41,7 +41,7 @@ import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_ERROR; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_MARC_BIB_FOR_ORDER_CREATED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_PARSED_RECORDS_CHUNK_SAVED; -import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_CREATED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; import static org.folio.rest.jaxrs.model.Record.RecordType.MARC_BIB; import static org.folio.rest.util.OkapiConnectionParams.OKAPI_TENANT_HEADER; import static org.folio.rest.util.OkapiConnectionParams.OKAPI_TOKEN_HEADER; @@ -126,7 +126,7 @@ public void shouldPublishCoupleOfSuccessEventsAndCoupleOfDiErrorEvents() throws kafkaCluster.send(request); // then - List successValues = observeValuesAndFilterByLeader("00116nam 22000731a 4700", DI_SRS_MARC_BIB_RECORD_CREATED, 3); + List successValues = observeValuesAndFilterByLeader("00116nam 22000731a 4700", DI_INCOMING_MARC_BIB_RECORD_PARSED, 3); assertEquals(3, successValues.size()); List diErrorValues = observeValuesAndFilterByLeader("13113c7m a2200553Ii 4800", DI_ERROR, 7); @@ -146,10 +146,10 @@ public void shouldSendEventsWithRecords() throws InterruptedException { kafkaCluster.send(request); // then - List observedValues = observeValuesAndFilterByLeader("00115nam 22000731a 4500", DI_SRS_MARC_BIB_RECORD_CREATED, 1); + List observedValues = observeValuesAndFilterByLeader("00115nam 22000731a 4500", DI_INCOMING_MARC_BIB_RECORD_PARSED, 1); Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); - assertEquals(DI_SRS_MARC_BIB_RECORD_CREATED.value(), eventPayload.getEventType()); + assertEquals(DI_INCOMING_MARC_BIB_RECORD_PARSED.value(), eventPayload.getEventType()); assertEquals(TENANT_ID, eventPayload.getTenant()); assertNotNull(eventPayload.getContext().get(EntityType.MARC_BIBLIOGRAPHIC.value())); assertNotNull(eventPayload.getContext().get(JOB_PROFILE_SNAPSHOT_ID)); @@ -169,10 +169,10 @@ public void shouldObserveOnlySingleEventInCaseOfDuplicates() throws InterruptedE kafkaCluster.send(request); // then - List observedValues = observeValuesAndFilterByLeader("00115nam 22000731a 4500", DI_SRS_MARC_BIB_RECORD_CREATED, 1); + List observedValues = observeValuesAndFilterByLeader("00115nam 22000731a 4500", DI_INCOMING_MARC_BIB_RECORD_PARSED, 1); Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class); DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class); - assertEquals(DI_SRS_MARC_BIB_RECORD_CREATED.value(), eventPayload.getEventType()); + assertEquals(DI_INCOMING_MARC_BIB_RECORD_PARSED.value(), eventPayload.getEventType()); } @Test diff --git a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/util/MarcImportEventsHandlerTest.java b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/util/MarcImportEventsHandlerTest.java index ce7339fc6..3ea2463a9 100644 --- a/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/util/MarcImportEventsHandlerTest.java +++ b/mod-source-record-manager-server/src/test/java/org/folio/verticle/consumers/util/MarcImportEventsHandlerTest.java @@ -11,7 +11,7 @@ import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_ORDER_CREATED_READY_FOR_POST_PROCESSING; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_PENDING_ORDER_CREATED; import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_CREATED; -import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_CREATED; +import static org.folio.rest.jaxrs.model.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.ArgumentMatchers.any; @@ -300,7 +300,7 @@ var record = new Record() payloadContext.put("HOLDINGS","[{\"instanceId\":\"946c4945-b711-4e67-bfb9-83fa30be6332\",\"hrid\":\"ho001\",\"id\":\"946c4945-b711-4e67-bfb9-83fa37be6312\"},{\"instanceId\":\"946c4945-b711-4e67-bfb9-83fa30be6331\",\"hrid\":\"ho002\",\"id\":\"946c4945-b111-4e67-bfb9-83fa30be6312\"}]"); payloadContext.put("NOT_MATCHED_NUMBER","3"); return new DataImportEventPayload() - .withEventsChain(List.of(DI_SRS_MARC_BIB_RECORD_CREATED.value())) + .withEventsChain(List.of(DI_INCOMING_MARC_BIB_RECORD_PARSED.value())) .withEventType(DI_INVENTORY_HOLDING_MATCHED.value()) .withContext(payloadContext); } @@ -315,7 +315,7 @@ var record = new Record() payloadContext.put("ITEM","[{\"holdingsRecordId\":\"946c4945-b711-4e67-bfb9-83fa30be633c\",\"hrid\":\"it001\",\"id\":\"946c4945-b711-4e67-bfb9-83fa30be4312\"},{\"holdingsRecordId\":\"946c4945-b711-4e67-bfb9-83fa30be633b\",\"hrid\":\"it002\",\"id\":\"946c4945-b711-4e67-bfb9-83fa30be6312\"}]"); payloadContext.put("NOT_MATCHED_NUMBER","5"); return new DataImportEventPayload() - .withEventsChain(List.of(DI_SRS_MARC_BIB_RECORD_CREATED.value())) + .withEventsChain(List.of(DI_INCOMING_MARC_BIB_RECORD_PARSED.value())) .withEventType(DI_INVENTORY_ITEM_MATCHED.value()) .withContext(payloadContext); } diff --git a/ramls/change-manager.raml b/ramls/change-manager.raml index f9ad9df07..fdd3c292e 100644 --- a/ramls/change-manager.raml +++ b/ramls/change-manager.raml @@ -33,6 +33,7 @@ types: sourceRecordState: !include sourceRecordState.json deleteJobExecutionsReq: !include raml-storage/schemas/mod-source-record-manager/deleteJobExecutionsReq.json deleteJobExecutionsResp: !include raml-storage/schemas/mod-source-record-manager/deleteJobExecutionsResp.json + incomingRecord: !include raml-storage/schemas/mod-source-record-manager/incomingRecord.json traits: validate: !include raml-storage/raml-util/traits/validation.raml diff --git a/ramls/metadata-provider.raml b/ramls/metadata-provider.raml index 370ab6500..2088a9fe3 100644 --- a/ramls/metadata-provider.raml +++ b/ramls/metadata-provider.raml @@ -17,12 +17,12 @@ types: errors: !include raml-storage/raml-util/schemas/errors.schema jobExecutionSourceChunk: !include raml-storage/schemas/mod-source-record-manager/jobExecutionSourceChunk.json journalRecordCollection: !include raml-storage/schemas/mod-source-record-manager/journalRecordCollection.json - jobLogEntryDto: !include raml-storage/schemas/dto/jobLogEntryDto.json - jobLogEntryDtoCollection: !include raml-storage/schemas/dto/jobLogEntryDtoCollection.json recordProcessingLogDto: !include raml-storage/schemas/dto/recordProcessingLogDto.json + recordProcessingLogDtoCollection: !include raml-storage/schemas/dto/recordProcessingLogDtoCollection.json jobExecutionSummaryDto: !include raml-storage/schemas/dto/jobExecutionSummaryDto.json jobProfileInfoCollection: !include raml-storage/schemas/common/profileInfoCollection.json jobExecutionUserInfoCollection: !include raml-storage/schemas/dto/jobExecutionUserInfoCollection.json + incomingRecord: !include raml-storage/schemas/mod-source-record-manager/incomingRecord.json traits: validate: !include raml-storage/raml-util/traits/validation.raml @@ -176,7 +176,7 @@ resourceTypes: 200: body: application/json: - type: jobLogEntryDtoCollection + type: recordProcessingLogDtoCollection 400: description: "Bad request" body: @@ -242,4 +242,17 @@ resourceTypes: body: text/plain: example: "Internal server error" + /incomingRecords/{recordId}: + get: + description: get incoming record by id + responses: + 200: + body: + application/json: + type: incomingRecord + 404: + description: "Not found" + body: + text/plain: + example: "Not found" diff --git a/ramls/raml-storage b/ramls/raml-storage index 52bb24f20..3f6af3f26 160000 --- a/ramls/raml-storage +++ b/ramls/raml-storage @@ -1 +1 @@ -Subproject commit 52bb24f20ff1c34d76eaf509d81fec96e794cc9f +Subproject commit 3f6af3f264dfa984338afe128412f0e20d02e361 From 71d12544e72d8c24812cf2adee15d3ec1ce09b48 Mon Sep 17 00:00:00 2001 From: Viacheslav Kolesnyk <94473337+viacheslavkol@users.noreply.github.com> Date: Mon, 22 Jan 2024 10:33:45 +0100 Subject: [PATCH 2/2] MODSOURMAN-1116 Accomodate for authority-source-files api optimistic locking changes (#843) --- descriptors/ModuleDescriptor-template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/descriptors/ModuleDescriptor-template.json b/descriptors/ModuleDescriptor-template.json index 87208d025..c7fcf77e3 100644 --- a/descriptors/ModuleDescriptor-template.json +++ b/descriptors/ModuleDescriptor-template.json @@ -646,7 +646,7 @@ }, { "id": "authority-source-files", - "version": "2.1" + "version": "2.2" } ], "permissionSets": [