From 5f43cc4a13cd1c5c65bfe80159ddee39a6eccbeb Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Fri, 25 Oct 2024 12:03:17 +0500 Subject: [PATCH 01/19] modelinks-115: disable concurrent links event processing --- .../verticle/consumers/AuthorityLinkChunkConsumersVerticle.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java index 8094b3841..643d50666 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java @@ -19,7 +19,7 @@ public class AuthorityLinkChunkConsumersVerticle extends AbstractConsumerVerticl private final AuthorityLinkChunkKafkaHandler kafkaHandler; - @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:2}") + @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:1}") private int loadLimit; @Autowired From ad1d0a08f0647b8c5f2398a0229cf0c4e528e51f Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Wed, 30 Oct 2024 19:06:16 +0500 Subject: [PATCH 02/19] modelinks-115: test --- mod-source-record-storage-server/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mod-source-record-storage-server/pom.xml b/mod-source-record-storage-server/pom.xml index 3b4babe9f..7bc31a7a1 100644 --- a/mod-source-record-storage-server/pom.xml +++ b/mod-source-record-storage-server/pom.xml @@ -182,7 +182,7 @@ org.folio data-import-processing-core - 4.3.0-SNAPSHOT + 4.3.0 io.vertx From 8a5a6dd52d227c72e81ce2db11ca1e55202ed77f Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Thu, 31 Oct 2024 13:40:06 +0500 Subject: [PATCH 03/19] modelinks-115: force completing the authority link event handling --- .../org/folio/consumers/AuthorityLinkChunkKafkaHandler.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index 345f9680a..a0cbb9159 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -89,7 +89,7 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k public Future handle(KafkaConsumerRecord consumerRecord) { LOGGER.trace("handle:: Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); - return mapToEvent(consumerRecord) + var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) .compose(event -> retrieveRecords(event, event.getTenant()) .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) @@ -102,7 +102,8 @@ public Future handle(KafkaConsumerRecord consumerRecord) LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); return Future.failedFuture(th); } - ); + ).result(); + return Future.succeededFuture(result); } private Future mapToEvent(KafkaConsumerRecord consumerRecord) { From 3529099eff0240b792784aeef26ce87b5e3eb0cf Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Fri, 1 Nov 2024 15:29:19 +0500 Subject: [PATCH 04/19] modsource-817: debug with logs --- .../AuthorityLinkChunkKafkaHandler.java | 25 ++++---- .../java/org/folio/dao/RecordDaoImpl.java | 57 ++++++++++--------- 2 files changed, 44 insertions(+), 38 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index a0cbb9159..45663747c 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -87,7 +87,7 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k @Override public Future handle(KafkaConsumerRecord consumerRecord) { - LOGGER.trace("handle:: Handling kafka record: {}", consumerRecord); + LOGGER.info("handle:: START Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) @@ -103,6 +103,7 @@ public Future handle(KafkaConsumerRecord consumerRecord) return Future.failedFuture(th); } ).result(); + LOGGER.info("handle:: END Handling kafka record"); return Future.succeededFuture(result); } @@ -119,13 +120,13 @@ private Future mapToEvent(KafkaConsumerRecord retrieveRecords(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, String tenantId) { - LOGGER.trace("Retrieving bibs for jobId {}, authorityId {}", + LOGGER.info("Retrieving bibs for jobId {}, authorityId {}", bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); var instanceIds = bibAuthorityLinksUpdate.getUpdateTargets().stream() .flatMap(updateTarget -> updateTarget.getLinks().stream() .map(Link::getInstanceId)) .distinct() - .collect(Collectors.toList()); + .toList(); var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) .and(RecordDaoUtil.filterRecordByDeleted(false)); @@ -134,7 +135,7 @@ private Future retrieveRecords(BibAuthorityLinksUpdate bibAuth private Future mapRecordFieldsChanges(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, RecordCollection recordCollection, String userId) { - LOGGER.debug("Retrieved {} bib records for jobId {}, authorityId {}", + LOGGER.info("Retrieved {} bib records for jobId {}, authorityId {}", recordCollection.getTotalRecords(), bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); return getLinkProcessorForEvent(bibAuthorityLinksUpdate).map(linkProcessor -> { @@ -195,17 +196,17 @@ private Future mapRecordFieldsChanges(BibAuthorityLinksUpdate private Future getLinkProcessorForEvent(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { var eventType = bibAuthorityLinksUpdate.getType(); switch (eventType) { - case DELETE: { + case DELETE -> { LOGGER.debug("Precessing DELETE event for jobId {}, authorityId {}", bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); return Future.succeededFuture(new DeleteLinkProcessor()); } - case UPDATE: { + case UPDATE -> { LOGGER.debug("Precessing UPDATE event for jobId {}, authorityId {}", bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); return Future.succeededFuture(new UpdateLinkProcessor()); } - default: { + default -> { return Future.failedFuture(new IllegalArgumentException( String.format("Unsupported event type: %s for jobId %s, authorityId %s", eventType, bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()))); @@ -219,7 +220,7 @@ private List extractUpdateTargetFieldCodesForInstance(BibAuthorityLinksU .filter(updateTarget -> updateTarget.getLinks().stream() .anyMatch(link -> link.getInstanceId().equals(instanceId))) .map(UpdateTarget::getField) - .collect(Collectors.toList()); + .toList(); } private Optional getAuthorityIdSubfield(List subfields) { @@ -230,7 +231,7 @@ private Optional getAuthorityIdSubfield(List subfields) { private List mapRecordsToBibUpdateEvents(RecordsBatchResponse batchResponse, BibAuthorityLinksUpdate event) { - LOGGER.debug("Updated {} bibs for jobId {}, authorityId {}", + LOGGER.info("Updated {} bibs for jobId {}, authorityId {}", batchResponse.getTotalRecords(), event.getJobId(), event.getAuthorityId()); var errors = batchResponse.getErrorMessages(); @@ -261,7 +262,7 @@ private List toMarcBibUpdateEvents(RecordsBatchResponse batchResp .withTs(bibAuthorityLinksUpdate.getTs()) .withRecord(bibRecord); }) - .collect(Collectors.toList()); + .toList(); } private List toFailedLinkUpdateReports(List errorRecords, @@ -282,7 +283,7 @@ private List toFailedLinkUpdateReports(List errorRecor .withFailCause(bibRecord.getErrorRecord().getDescription()) .withStatus(FAIL); }) - .collect(Collectors.toList()); + .toList(); } private Future createSnapshot(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { @@ -377,7 +378,7 @@ private KafkaProducerRecord createKafkaProducerRecord(KafkaTopic private List getErrorRecords(RecordsBatchResponse batchResponse) { return batchResponse.getRecords().stream() .filter(marcRecord -> nonNull(marcRecord.getErrorRecord())) - .collect(Collectors.toList()); + .toList(); } } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index 5fe8b270d..fc9aa5790 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -46,7 +46,6 @@ import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.reactivex.pgclient.PgPool; -import io.vertx.reactivex.sqlclient.SqlConnection; import io.vertx.sqlclient.Row; import java.sql.Connection; import java.sql.SQLException; @@ -63,7 +62,6 @@ import java.util.Set; import java.util.UUID; import java.util.function.Function; -import java.util.stream.Collectors; import javax.ws.rs.BadRequestException; import javax.ws.rs.NotFoundException; import net.sf.jsqlparser.JSQLParserException; @@ -377,7 +375,11 @@ public Future> getMatchedRecordsWithoutIndexersVersionUsage(MatchFi ) .offset(offset) .limit(limit > 0 ? limit : DEFAULT_LIMIT_FOR_GET_RECORDS) - )).map(queryResult -> queryResult.stream().map(res -> asRow(res.unwrap())).map(this::toRecord).collect(Collectors.toList())); + )).map(queryResult -> queryResult.stream() + .map(res -> asRow(res.unwrap())) + .map(this::toRecord) + .toList() + ); } private Condition getMatchedFieldCondition(MatchField matchedField, Filter.ComparisonPartType comparisonPartType, String partition) { @@ -445,7 +447,8 @@ private String getValueInSqlFormat(Value value) { } if (Value.ValueType.LIST.equals(value.getType())) { List listOfValues = ((ListValue) value).getValue().stream() - .map(v -> format(VALUE_IN_SINGLE_QUOTES, v)).collect(Collectors.toList()); + .map(v -> format(VALUE_IN_SINGLE_QUOTES, v)) + .toList(); return StringUtils.join(listOfValues, ", "); } return StringUtils.EMPTY; @@ -630,13 +633,6 @@ private void appendWhere(SelectJoinStep step, ParseLeaderResult parseLeaderResul .and(RECORDS_LB.EXTERNAL_ID.isNotNull()); } - private Flowable streamMarcRecordIdsWithoutIndexersVersionUsage(SqlConnection conn, ParseLeaderResult parseLeaderResult, - ParseFieldsResult parseFieldsResult, - RecordSearchParameters searchParameters) { - return conn.rxPrepare(getAlternativeQuery(parseLeaderResult, parseFieldsResult, searchParameters)) - .flatMapPublisher(pq -> pq.createStream(10000).toFlowable()); - } - private String getAlternativeQuery(ParseLeaderResult parseLeaderResult, ParseFieldsResult parseFieldsResult, RecordSearchParameters searchParameters) { SelectJoinStep> searchQuery = selectDistinct(RECORDS_LB.EXTERNAL_ID).from(RECORDS_LB); appendJoinAlternative(searchQuery, parseLeaderResult, parseFieldsResult); @@ -906,6 +902,10 @@ public Future saveRecords(RecordCollection recordCollectio matchedGenerations.put(matchedId, generation); }); + LOG.info("saveRecords:: matched ids: {}", matchedIds); + LOG.info("saveRecords:: records ids: {}", ids); + LOG.info("saveRecords:: generations: {}", matchedGenerations); + // update matching records state if(!ids.isEmpty()) { @@ -921,15 +921,17 @@ public Future saveRecords(RecordCollection recordCollectio .bulkAfter(500) .commitAfter(1000) .onErrorAbort() - .loadRecords(dbRecords.stream().map(record -> { - Integer generation = matchedGenerations.get(record.getMatchedId()); - if (Objects.nonNull(generation)) { - record.setGeneration(generation + 1); - } else if (Objects.isNull(record.getGeneration())) { - record.setGeneration(0); - } - return record; - }).collect(Collectors.toList())) + .loadRecords(dbRecords.stream() + .map(record -> { + Integer generation = matchedGenerations.get(record.getMatchedId()); + if (Objects.nonNull(generation)) { + record.setGeneration(generation + 1); + } else if (Objects.isNull(record.getGeneration())) { + record.setGeneration(0); + } + return record; + }) + .toList()) .fieldsCorresponding() .execute() .errors(); @@ -987,7 +989,7 @@ public Future saveRecords(RecordCollection recordCollectio promise.fail(e.getCause()); } }, - false, + true, r -> { if (r.failed()) { LOG.warn("saveRecords:: Error during batch record save", r.cause()); @@ -1273,7 +1275,7 @@ public Future updateParsedRecords(RecordCollection r blockingPromise.complete(new ParsedRecordsBatchResponse() .withErrorMessages(errorMessages) - .withParsedRecords(recordsUpdated.stream().map(Record::getParsedRecord).collect(Collectors.toList())) + .withParsedRecords(recordsUpdated.stream().map(Record::getParsedRecord).toList()) .withTotalRecords(recordsUpdated.size())); }); } catch (SQLException e) { @@ -1474,7 +1476,7 @@ public Future updateMarcAuthorityRecordsStateAsDeleted(String matchedId, S .compose(recordCollection -> { List> futures = recordCollection.getRecords().stream() .map(recordToUpdate -> updateMarcAuthorityRecordWithDeletedState(txQE, ensureRecordForeignKeys(recordToUpdate))) - .collect(Collectors.toList()); + .toList(); Promise result = Promise.promise(); GenericCompositeFuture.all(futures).onComplete(ar -> { @@ -1668,7 +1670,8 @@ private RecordCollection toRecordCollection(QueryResult result) { List records = result.stream().map(res -> asRow(res.unwrap())).map(row -> { recordCollection.setTotalRecords(row.getInteger(COUNT)); return toRecord(row); - }).collect(Collectors.toList()); + }) + .toList(); if (!records.isEmpty() && Objects.nonNull(records.get(0).getId())) { recordCollection.withRecords(records); } @@ -1680,7 +1683,8 @@ private StrippedParsedRecordCollection toStrippedParsedRecordCollection(QueryRes List records = result.stream().map(res -> asRow(res.unwrap())).map(row -> { recordCollection.setTotalRecords(row.getInteger(COUNT)); return toStrippedParsedRecord(row); - }).collect(Collectors.toList()); + }) + .toList(); if (!records.isEmpty() && Objects.nonNull(records.get(0).getId())) { recordCollection.withRecords(records); } @@ -1705,7 +1709,8 @@ private SourceRecordCollection toSourceRecordCollection(QueryResult result) { sourceRecordCollection.setTotalRecords(row.getInteger(COUNT)); return RecordDaoUtil.toSourceRecord(RecordDaoUtil.toRecord(row)) .withParsedRecord(ParsedRecordDaoUtil.toParsedRecord(row)); - }).collect(Collectors.toList()); + }) + .toList(); if (!sourceRecords.isEmpty() && Objects.nonNull(sourceRecords.get(0).getRecordId())) { sourceRecordCollection.withSourceRecords(sourceRecords); } From 51721e02a8cc45436cda8ec32375f53e09ba668f Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Tue, 5 Nov 2024 13:37:10 +0500 Subject: [PATCH 05/19] modsource-817: add blocking get and update service method --- .../AuthorityLinkChunkKafkaHandler.java | 55 ++++++++++++++----- .../java/org/folio/dao/RecordDaoImpl.java | 20 +------ .../org/folio/services/RecordService.java | 23 ++++++++ .../org/folio/services/RecordServiceImpl.java | 34 ++++++++++++ .../AuthorityLinkChunkConsumersVerticle.java | 2 +- 5 files changed, 100 insertions(+), 34 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index 45663747c..ac7585992 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -14,6 +14,7 @@ import static org.folio.services.util.KafkaUtil.extractHeaderValue; import io.vertx.core.Future; +import io.vertx.core.Handler; import io.vertx.core.Promise; import io.vertx.core.json.Json; import io.vertx.kafka.client.consumer.KafkaConsumerRecord; @@ -30,6 +31,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -87,24 +89,49 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k @Override public Future handle(KafkaConsumerRecord consumerRecord) { - LOGGER.info("handle:: START Handling kafka record: {}", consumerRecord); + LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); + var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) - .compose(event -> retrieveRecords(event, event.getTenant()) - .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) - .compose(recordCollection -> recordService.saveRecords(recordCollection, - toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) - .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) - .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) - .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) - ).recover(th -> { - LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); - return Future.failedFuture(th); - } - ).result(); + .compose(linksUpdate -> { + var instanceIds = linksUpdate.getUpdateTargets().stream() + .flatMap(updateTarget -> updateTarget.getLinks().stream() + .map(Link::getInstanceId)) + .distinct() + .toList(); + var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) + .and(RecordDaoUtil.filterRecordByDeleted(false)); + var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); + Handler postUpdateHandler = recordsBatchResponse -> { + sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); + var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); + sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); + }; + Function> recordsModifier = recordsCollection -> + this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); + + return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), + recordsModifier, okapiHeaders, postUpdateHandler); + }) + .map(batchResponse -> consumerRecord.key()); + +// var result = mapToEvent(consumerRecord) +// .compose(this::createSnapshot) +// .compose(event -> retrieveRecords(event, event.getTenant()) +// .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) +// .compose(recordCollection -> recordService.saveRecords(recordCollection, +// toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) +// .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) +// .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) +// .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) +// ).recover(th -> { +// LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); +// return Future.failedFuture(th); +// } +// ).result(); LOGGER.info("handle:: END Handling kafka record"); - return Future.succeededFuture(result); + return result; } private Future mapToEvent(KafkaConsumerRecord consumerRecord) { diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index fc9aa5790..7dbee7242 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -633,24 +633,6 @@ private void appendWhere(SelectJoinStep step, ParseLeaderResult parseLeaderResul .and(RECORDS_LB.EXTERNAL_ID.isNotNull()); } - private String getAlternativeQuery(ParseLeaderResult parseLeaderResult, ParseFieldsResult parseFieldsResult, RecordSearchParameters searchParameters) { - SelectJoinStep> searchQuery = selectDistinct(RECORDS_LB.EXTERNAL_ID).from(RECORDS_LB); - appendJoinAlternative(searchQuery, parseLeaderResult, parseFieldsResult); - appendWhere(searchQuery, parseLeaderResult, parseFieldsResult, searchParameters); - if (searchParameters.getOffset() != null) { - searchQuery.offset(searchParameters.getOffset()); - } - if (searchParameters.getLimit() != null) { - searchQuery.limit(searchParameters.getLimit()); - } - - SelectJoinStep> countQuery = DSL.select(countDistinct(RECORDS_LB.EXTERNAL_ID)).from(RECORDS_LB); - appendJoinAlternative(countQuery, parseLeaderResult, parseFieldsResult); - appendWhere(countQuery, parseLeaderResult, parseFieldsResult, searchParameters); - - return DSL.select().from(searchQuery).rightJoin(countQuery).on(DSL.trueCondition()).getSQL(ParamType.INLINED); - } - private void appendJoinAlternative(SelectJoinStep selectJoinStep, ParseLeaderResult parseLeaderResult, ParseFieldsResult parseFieldsResult) { if (parseLeaderResult.isEnabled()) { Table marcIndexersLeader = table(name("marc_indexers_leader")); @@ -989,7 +971,7 @@ public Future saveRecords(RecordCollection recordCollectio promise.fail(e.getCause()); } }, - true, + false, r -> { if (r.failed()) { LOG.warn("saveRecords:: Error during batch record save", r.cause()); diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java index 14ded8a17..bdcba0b9b 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java @@ -4,7 +4,9 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.Function; +import io.vertx.core.Handler; import io.vertx.sqlclient.Row; import org.folio.dao.util.IdType; import org.folio.dao.util.RecordType; @@ -268,5 +270,26 @@ public interface RecordService { */ Future updateRecordsState(String matchedId, RecordState state, RecordType recordType, String tenantId); + /** + * Searches for {@link Record} by {@link Condition} of {@link RecordType} with given offset and limit, applies the + * function to modify the found records and persist the updates. Once records persisting is complete it applies the + * provided post-update handler on operation's result. + * + * @param condition query where condition + * @param recordType record type + * @param offset starting index in a list of results + * @param limit limit of records for pagination + * @param recordsModifier function to apply modifications on fetched records + * @param okapiHeaders okapi headers + * @param postUpdateHandler handler on the updated records + * @return future with response containing list of successfully updated records and error messages for records that + * were not updated + */ + Future getAndUpdateRecordsBlocking(Condition condition, RecordType recordType, + int offset, int limit, + Function> recordsModifier, + Map okapiHeaders, + Handler postUpdateHandler); + Future deleteRecordById(String id, IdType idType, Map okapiHeaders); } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java index 5b662d7ee..6af009584 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java @@ -23,8 +23,11 @@ import io.reactivex.Flowable; import io.vertx.core.AsyncResult; +import io.vertx.core.Context; import io.vertx.core.Future; +import io.vertx.core.Handler; import io.vertx.core.Promise; +import io.vertx.core.Vertx; import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; import io.vertx.pgclient.PgException; @@ -39,6 +42,7 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; +import java.util.function.Function; import java.util.stream.Collectors; import javax.ws.rs.BadRequestException; import javax.ws.rs.NotFoundException; @@ -120,6 +124,36 @@ public Future getRecords(Condition condition, RecordType recor return recordDao.getRecords(condition, recordType, orderFields, offset, limit, tenantId); } + @Override + public Future getAndUpdateRecordsBlocking(Condition condition, RecordType recordType, + int offset, int limit, + Function> recordsModifier, + Map okapiHeaders, + Handler postUpdateHandler) { + var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); + Promise promise = Promise.promise(); + Context context = Vertx.currentContext(); + if(context == null) { + return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); + } + + context.owner().executeBlocking(() -> + getRecords(condition, recordType, Collections.emptyList(), offset, limit, tenantId) + .compose(recordsModifier::apply) + .compose(recordsCollection -> saveRecords(recordsCollection, okapiHeaders)) + .onComplete(asyncResult -> { + if (asyncResult.failed()) { + promise.fail(asyncResult.cause()); + } else { + postUpdateHandler.handle(asyncResult.result()); + promise.complete(asyncResult.result()); + } + }) + ); + + return promise.future(); + } + @Override public Flowable streamRecords(Condition condition, RecordType recordType, Collection> orderFields, int offset, int limit, String tenantId) { return recordDao.streamRecords(condition, recordType, orderFields, offset, limit, tenantId); diff --git a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java index 643d50666..8094b3841 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java @@ -19,7 +19,7 @@ public class AuthorityLinkChunkConsumersVerticle extends AbstractConsumerVerticl private final AuthorityLinkChunkKafkaHandler kafkaHandler; - @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:1}") + @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:2}") private int loadLimit; @Autowired From 58675c231139cf9392b25ff251fb29c46006dbf5 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Tue, 5 Nov 2024 17:54:20 +0500 Subject: [PATCH 06/19] fix(marc-bib-update): Fix data consistency in handling and updating Marc Bib records for links.instance-authority event - make the handling of events for instance-authority links so that no Marc Bib modifications from are lost due to concurrent nature of handling events - make sure the event of the first in a sequence of updates sent by the producer and then received by the consumer is handled before with persisting the changes of related bibs than the events produced/consumed later Closes: MODSOURCE-817 --- .../AuthorityLinkChunkKafkaHandler.java | 39 +++---------------- .../java/org/folio/dao/RecordDaoImpl.java | 4 -- 2 files changed, 5 insertions(+), 38 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index ac7585992..a91afbf2b 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -1,6 +1,5 @@ package org.folio.consumers; -import static java.util.Collections.emptyList; import static java.util.Objects.nonNull; import static org.apache.commons.collections4.CollectionUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.EMPTY; @@ -108,29 +107,15 @@ public Future handle(KafkaConsumerRecord consumerRecord) var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); }; - Function> recordsModifier = recordsCollection -> - this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); + Function> recordsModifier = + recordsCollection -> this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), recordsModifier, okapiHeaders, postUpdateHandler); }) .map(batchResponse -> consumerRecord.key()); -// var result = mapToEvent(consumerRecord) -// .compose(this::createSnapshot) -// .compose(event -> retrieveRecords(event, event.getTenant()) -// .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) -// .compose(recordCollection -> recordService.saveRecords(recordCollection, -// toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) -// .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) -// .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) -// .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) -// ).recover(th -> { -// LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); -// return Future.failedFuture(th); -// } -// ).result(); - LOGGER.info("handle:: END Handling kafka record"); + LOGGER.info("handle:: Finish Handling kafka record"); return result; } @@ -146,23 +131,9 @@ private Future mapToEvent(KafkaConsumerRecord retrieveRecords(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, String tenantId) { - LOGGER.info("Retrieving bibs for jobId {}, authorityId {}", - bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); - var instanceIds = bibAuthorityLinksUpdate.getUpdateTargets().stream() - .flatMap(updateTarget -> updateTarget.getLinks().stream() - .map(Link::getInstanceId)) - .distinct() - .toList(); - - var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) - .and(RecordDaoUtil.filterRecordByDeleted(false)); - return recordService.getRecords(condition, RecordType.MARC_BIB, emptyList(), 0, instanceIds.size(), tenantId); - } - private Future mapRecordFieldsChanges(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, RecordCollection recordCollection, String userId) { - LOGGER.info("Retrieved {} bib records for jobId {}, authorityId {}", + LOGGER.debug("Retrieved {} bib records for jobId {}, authorityId {}", recordCollection.getTotalRecords(), bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); return getLinkProcessorForEvent(bibAuthorityLinksUpdate).map(linkProcessor -> { @@ -258,7 +229,7 @@ private Optional getAuthorityIdSubfield(List subfields) { private List mapRecordsToBibUpdateEvents(RecordsBatchResponse batchResponse, BibAuthorityLinksUpdate event) { - LOGGER.info("Updated {} bibs for jobId {}, authorityId {}", + LOGGER.debug("Updated {} bibs for jobId {}, authorityId {}", batchResponse.getTotalRecords(), event.getJobId(), event.getAuthorityId()); var errors = batchResponse.getErrorMessages(); diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index 7dbee7242..99e7aaa04 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -884,10 +884,6 @@ public Future saveRecords(RecordCollection recordCollectio matchedGenerations.put(matchedId, generation); }); - LOG.info("saveRecords:: matched ids: {}", matchedIds); - LOG.info("saveRecords:: records ids: {}", ids); - LOG.info("saveRecords:: generations: {}", matchedGenerations); - // update matching records state if(!ids.isEmpty()) { From a076434488f2c77393bc47163ae8e6cd09d91a5d Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Tue, 5 Nov 2024 20:00:46 +0500 Subject: [PATCH 07/19] modsource-817: test blocking in handler --- .../AuthorityLinkChunkKafkaHandler.java | 93 +++++++++++-------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index ac7585992..6992781df 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -13,9 +13,11 @@ import static org.folio.services.util.EventHandlingUtil.toOkapiHeaders; import static org.folio.services.util.KafkaUtil.extractHeaderValue; +import io.vertx.core.Context; import io.vertx.core.Future; import io.vertx.core.Handler; import io.vertx.core.Promise; +import io.vertx.core.Vertx; import io.vertx.core.json.Json; import io.vertx.kafka.client.consumer.KafkaConsumerRecord; import io.vertx.kafka.client.producer.KafkaHeader; @@ -91,47 +93,62 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k public Future handle(KafkaConsumerRecord consumerRecord) { LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); - - var result = mapToEvent(consumerRecord) - .compose(this::createSnapshot) - .compose(linksUpdate -> { - var instanceIds = linksUpdate.getUpdateTargets().stream() - .flatMap(updateTarget -> updateTarget.getLinks().stream() - .map(Link::getInstanceId)) - .distinct() - .toList(); - var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) - .and(RecordDaoUtil.filterRecordByDeleted(false)); - var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); - Handler postUpdateHandler = recordsBatchResponse -> { - sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); - var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); - sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); - }; - Function> recordsModifier = recordsCollection -> - this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); - - return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), - recordsModifier, okapiHeaders, postUpdateHandler); - }) - .map(batchResponse -> consumerRecord.key()); - +// // var result = mapToEvent(consumerRecord) // .compose(this::createSnapshot) -// .compose(event -> retrieveRecords(event, event.getTenant()) -// .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) -// .compose(recordCollection -> recordService.saveRecords(recordCollection, -// toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) -// .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) -// .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) -// .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) -// ).recover(th -> { -// LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); -// return Future.failedFuture(th); -// } -// ).result(); +// .compose(linksUpdate -> { +// var instanceIds = linksUpdate.getUpdateTargets().stream() +// .flatMap(updateTarget -> updateTarget.getLinks().stream() +// .map(Link::getInstanceId)) +// .distinct() +// .toList(); +// var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) +// .and(RecordDaoUtil.filterRecordByDeleted(false)); +// var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); +// Handler postUpdateHandler = recordsBatchResponse -> { +// sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); +// var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); +// sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); +// }; +// Function> recordsModifier = recordsCollection -> +// this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); +// +// return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), +// recordsModifier, okapiHeaders, postUpdateHandler); +// }) +// .map(batchResponse -> consumerRecord.key()); + + Context context = Vertx.currentContext(); + if(context == null) { + return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); + } + + Promise promise = Promise.promise(); + + context.owner().executeBlocking(() -> + mapToEvent(consumerRecord) + .compose(this::createSnapshot) + .compose(event -> retrieveRecords(event, event.getTenant()) + .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) + .compose(recordCollection -> recordService.saveRecords(recordCollection, toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) + .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) + .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) + .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) + ).recover(th -> { + LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); + return Future.failedFuture(th); + } + ) + .onComplete(asyncResult -> { + if (asyncResult.failed()) { + promise.fail(asyncResult.cause()); + } else { + promise.complete(asyncResult.result()); + } + }) + ); LOGGER.info("handle:: END Handling kafka record"); - return result; + return promise.future(); } private Future mapToEvent(KafkaConsumerRecord consumerRecord) { From 9f310bcbf62a23b05e7affa615eeec60bafde4aa Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Tue, 5 Nov 2024 22:35:10 +0500 Subject: [PATCH 08/19] modsource-817: test blocking in handler --- .../AuthorityLinkChunkKafkaHandler.java | 108 +++++++++--------- 1 file changed, 53 insertions(+), 55 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index 6992781df..5b8374ed9 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -13,11 +13,9 @@ import static org.folio.services.util.EventHandlingUtil.toOkapiHeaders; import static org.folio.services.util.KafkaUtil.extractHeaderValue; -import io.vertx.core.Context; import io.vertx.core.Future; import io.vertx.core.Handler; import io.vertx.core.Promise; -import io.vertx.core.Vertx; import io.vertx.core.json.Json; import io.vertx.kafka.client.consumer.KafkaConsumerRecord; import io.vertx.kafka.client.producer.KafkaHeader; @@ -93,62 +91,62 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k public Future handle(KafkaConsumerRecord consumerRecord) { LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); -// -// var result = mapToEvent(consumerRecord) -// .compose(this::createSnapshot) -// .compose(linksUpdate -> { -// var instanceIds = linksUpdate.getUpdateTargets().stream() -// .flatMap(updateTarget -> updateTarget.getLinks().stream() -// .map(Link::getInstanceId)) -// .distinct() -// .toList(); -// var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) -// .and(RecordDaoUtil.filterRecordByDeleted(false)); -// var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); -// Handler postUpdateHandler = recordsBatchResponse -> { -// sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); -// var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); -// sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); -// }; -// Function> recordsModifier = recordsCollection -> -// this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); -// -// return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), -// recordsModifier, okapiHeaders, postUpdateHandler); -// }) -// .map(batchResponse -> consumerRecord.key()); - - Context context = Vertx.currentContext(); - if(context == null) { - return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); - } - - Promise promise = Promise.promise(); - context.owner().executeBlocking(() -> - mapToEvent(consumerRecord) + var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) - .compose(event -> retrieveRecords(event, event.getTenant()) - .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) - .compose(recordCollection -> recordService.saveRecords(recordCollection, toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) - .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) - .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) - .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) - ).recover(th -> { - LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); - return Future.failedFuture(th); - } - ) - .onComplete(asyncResult -> { - if (asyncResult.failed()) { - promise.fail(asyncResult.cause()); - } else { - promise.complete(asyncResult.result()); - } - }) - ); + .compose(linksUpdate -> { + var instanceIds = linksUpdate.getUpdateTargets().stream() + .flatMap(updateTarget -> updateTarget.getLinks().stream() + .map(Link::getInstanceId)) + .distinct() + .toList(); + var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) + .and(RecordDaoUtil.filterRecordByDeleted(false)); + var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); + Handler postUpdateHandler = recordsBatchResponse -> { + sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); + var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); + sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); + }; + Function> recordsModifier = recordsCollection -> + this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); + + return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), + recordsModifier, okapiHeaders, postUpdateHandler); + }) + .map(batchResponse -> consumerRecord.key()); + +// Context context = Vertx.currentContext(); +// if(context == null) { +// return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); +// } +// +// Promise promise = Promise.promise(); +// +// context.owner().executeBlocking(() -> +// mapToEvent(consumerRecord) +// .compose(this::createSnapshot) +// .compose(event -> retrieveRecords(event, event.getTenant()) +// .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) +// .compose(recordCollection -> recordService.saveRecords(recordCollection, toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) +// .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) +// .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) +// .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) +// ).recover(th -> { +// LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); +// return Future.failedFuture(th); +// } +// ) +// .onComplete(asyncResult -> { +// if (asyncResult.failed()) { +// promise.fail(asyncResult.cause()); +// } else { +// promise.complete(asyncResult.result()); +// } +// }) +// ); LOGGER.info("handle:: END Handling kafka record"); - return promise.future(); + return result; //promise.future(); } private Future mapToEvent(KafkaConsumerRecord consumerRecord) { From 5e8b715c0ddb57681216cd7353dc762223827803 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Wed, 6 Nov 2024 13:18:51 +0500 Subject: [PATCH 09/19] modsource-817: add separate save records method for running blocking code in order --- .../AuthorityLinkChunkKafkaHandler.java | 91 ++--- .../main/java/org/folio/dao/RecordDao.java | 17 +- .../java/org/folio/dao/RecordDaoImpl.java | 381 +++++++++--------- .../org/folio/services/RecordService.java | 37 +- .../org/folio/services/RecordServiceImpl.java | 45 +-- .../AuthorityLinkChunkConsumersVerticle.java | 2 +- 6 files changed, 270 insertions(+), 303 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index 5b8374ed9..48dc5b675 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -14,7 +14,6 @@ import static org.folio.services.util.KafkaUtil.extractHeaderValue; import io.vertx.core.Future; -import io.vertx.core.Handler; import io.vertx.core.Promise; import io.vertx.core.json.Json; import io.vertx.kafka.client.consumer.KafkaConsumerRecord; @@ -31,7 +30,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -89,64 +87,51 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k @Override public Future handle(KafkaConsumerRecord consumerRecord) { - LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); + LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) - .compose(linksUpdate -> { - var instanceIds = linksUpdate.getUpdateTargets().stream() - .flatMap(updateTarget -> updateTarget.getLinks().stream() - .map(Link::getInstanceId)) - .distinct() - .toList(); - var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) - .and(RecordDaoUtil.filterRecordByDeleted(false)); - var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); - Handler postUpdateHandler = recordsBatchResponse -> { - sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); - var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); - sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); - }; - Function> recordsModifier = recordsCollection -> - this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); - - return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), - recordsModifier, okapiHeaders, postUpdateHandler); - }) - .map(batchResponse -> consumerRecord.key()); - -// Context context = Vertx.currentContext(); -// if(context == null) { -// return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); -// } -// -// Promise promise = Promise.promise(); + .compose(event -> retrieveRecords(event, event.getTenant()) + .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) + .compose(recordCollection -> recordService.saveRecordsBlocking(recordCollection, true, + toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) + .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) + .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) + .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) + ).recover(th -> { + LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); + return Future.failedFuture(th); + } + ); +// var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); // -// context.owner().executeBlocking(() -> -// mapToEvent(consumerRecord) +// var result = mapToEvent(consumerRecord) // .compose(this::createSnapshot) -// .compose(event -> retrieveRecords(event, event.getTenant()) -// .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) -// .compose(recordCollection -> recordService.saveRecords(recordCollection, toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) -// .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) -// .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) -// .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) -// ).recover(th -> { -// LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); -// return Future.failedFuture(th); -// } -// ) -// .onComplete(asyncResult -> { -// if (asyncResult.failed()) { -// promise.fail(asyncResult.cause()); -// } else { -// promise.complete(asyncResult.result()); -// } -// }) -// ); +// .compose(linksUpdate -> { +// var instanceIds = linksUpdate.getUpdateTargets().stream() +// .flatMap(updateTarget -> updateTarget.getLinks().stream() +// .map(Link::getInstanceId)) +// .distinct() +// .toList(); +// var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) +// .and(RecordDaoUtil.filterRecordByDeleted(false)); +// var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); +// Handler postUpdateHandler = recordsBatchResponse -> { +// sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); +// var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); +// sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); +// }; +// Function> recordsModifier = recordsCollection -> +// this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); +// +// return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), +// recordsModifier, okapiHeaders, postUpdateHandler); +// }) +// .map(batchResponse -> consumerRecord.key()); + LOGGER.info("handle:: END Handling kafka record"); - return result; //promise.future(); + return result; } private Future mapToEvent(KafkaConsumerRecord consumerRecord) { diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java index 49f7040c9..11975e992 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java @@ -208,7 +208,9 @@ Future getMatchedRecordsIdentifiers(MatchField mat Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Record record, Map okapiHeaders); /** - * Saves {@link RecordCollection} to the db + * Saves {@link RecordCollection} to the db. + * This is same as calling {@link RecordDao#saveRecordsBlocking(RecordCollection, boolean, Map)} specifying + * orderedBLocking = false. * * @param recordCollection Record collection to save * @param okapiHeaders okapi headers @@ -216,6 +218,19 @@ Future getMatchedRecordsIdentifiers(MatchField mat */ Future saveRecords(RecordCollection recordCollection, Map okapiHeaders); + /** + * Saves {@link RecordCollection} to the db + * + * @param recordCollection Record collection to save + * @param okapiHeaders okapi headers + * @param orderedBlocking boolean indicator to control if blocking logic needs to be run sequentially (true) or + * concurrently (false) by the Verticle. + * @return future with saved {@link RecordsBatchResponse} + */ + Future saveRecordsBlocking(RecordCollection recordCollection, + boolean orderedBlocking, + Map okapiHeaders); + /** * Updates {{@link Record} in the db * diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index 7dbee7242..baa931589 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -766,221 +766,228 @@ public Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Recor @Override public Future saveRecords(RecordCollection recordCollection, Map okapiHeaders) { + return saveRecordsBlocking(recordCollection, false, okapiHeaders); + } + + @Override + public Future saveRecordsBlocking(RecordCollection recordCollection, + boolean orderedBlocking, + Map okapiHeaders) { var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); logRecordCollection("saveRecords:: Saving", recordCollection, tenantId); Promise finalPromise = Promise.promise(); Context context = Vertx.currentContext(); if(context == null) return Future.failedFuture("saveRecords must be executed by a Vertx thread"); context.owner().executeBlocking(promise -> { - Set matchedIds = new HashSet<>(); - Set snapshotIds = new HashSet<>(); - Set recordTypes = new HashSet<>(); - - List dbRecords = new ArrayList<>(); - List dbRawRecords = new ArrayList<>(); - List> dbParsedRecords = new ArrayList<>(); - List dbErrorRecords = new ArrayList<>(); - - List errorMessages = new ArrayList<>(); - - recordCollection.getRecords() - .stream() - .map(RecordDaoUtil::ensureRecordHasId) - .map(RecordDaoUtil::ensureRecordHasMatchedId) - .map(RecordDaoUtil::ensureRecordHasSuppressDiscovery) - .map(RecordDaoUtil::ensureRecordForeignKeys) - .forEach(record -> { - // collect unique matched ids to query to determine generation - matchedIds.add(UUID.fromString(record.getMatchedId())); - - // make sure only one snapshot id - snapshotIds.add(record.getSnapshotId()); - if (snapshotIds.size() > 1) { - throw new BadRequestException("Batch record collection only supports single snapshot"); - } + Set matchedIds = new HashSet<>(); + Set snapshotIds = new HashSet<>(); + Set recordTypes = new HashSet<>(); - if(Objects.nonNull(record.getRecordType())) { - recordTypes.add(record.getRecordType().name()); - } else { - throw new BadRequestException(StringUtils.defaultIfEmpty(record.getErrorRecord().getDescription(), String.format("Record with id %s has not record type", record.getId()))); - } + List dbRecords = new ArrayList<>(); + List dbRawRecords = new ArrayList<>(); + List> dbParsedRecords = new ArrayList<>(); + List dbErrorRecords = new ArrayList<>(); - // make sure only one record type - if (recordTypes.size() > 1) { - throw new BadRequestException("Batch record collection only supports single record type"); - } + List errorMessages = new ArrayList<>(); - // if record has parsed record, validate by attempting format - if (Objects.nonNull(record.getParsedRecord())) { - try { - RecordType recordType = toRecordType(record.getRecordType().name()); - recordType.formatRecord(record); - Record2 dbParsedRecord = recordType.toDatabaseRecord2(record.getParsedRecord()); - dbParsedRecords.add(dbParsedRecord); - } catch (Exception e) { - // create error record and remove from record - Object content = Objects.nonNull(record.getParsedRecord()) - ? record.getParsedRecord().getContent() - : null; - ErrorRecord errorRecord = new ErrorRecord() - .withId(record.getId()) - .withDescription(e.getMessage()) - .withContent(content); - errorMessages.add(format(INVALID_PARSED_RECORD_MESSAGE_TEMPLATE, record.getId(), e.getMessage())); - record.withErrorRecord(errorRecord) - .withParsedRecord(null) - .withLeaderRecordStatus(null); + recordCollection.getRecords() + .stream() + .map(RecordDaoUtil::ensureRecordHasId) + .map(RecordDaoUtil::ensureRecordHasMatchedId) + .map(RecordDaoUtil::ensureRecordHasSuppressDiscovery) + .map(RecordDaoUtil::ensureRecordForeignKeys) + .forEach(record -> { + // collect unique matched ids to query to determine generation + matchedIds.add(UUID.fromString(record.getMatchedId())); + + // make sure only one snapshot id + snapshotIds.add(record.getSnapshotId()); + if (snapshotIds.size() > 1) { + throw new BadRequestException("Batch record collection only supports single snapshot"); } - } - if (Objects.nonNull(record.getRawRecord())) { - dbRawRecords.add(RawRecordDaoUtil.toDatabaseRawRecord(record.getRawRecord())); - } - if (Objects.nonNull(record.getErrorRecord())) { - dbErrorRecords.add(ErrorRecordDaoUtil.toDatabaseErrorRecord(record.getErrorRecord())); - } - dbRecords.add(RecordDaoUtil.toDatabaseRecord(record)); - }); - - UUID snapshotId = UUID.fromString(snapshotIds.stream().findFirst().orElseThrow()); - RecordType recordType = toRecordType(recordTypes.stream().findFirst().orElseThrow()); + if(Objects.nonNull(record.getRecordType())) { + recordTypes.add(record.getRecordType().name()); + } else { + throw new BadRequestException(StringUtils.defaultIfEmpty(record.getErrorRecord().getDescription(), String.format("Record with id %s has not record type", record.getId()))); + } - try (Connection connection = getConnection(tenantId)) { - DSL.using(connection).transaction(ctx -> { - DSLContext dsl = DSL.using(ctx); + // make sure only one record type + if (recordTypes.size() > 1) { + throw new BadRequestException("Batch record collection only supports single record type"); + } - // validate snapshot - Optional snapshot = DSL.using(ctx).selectFrom(SNAPSHOTS_LB) - .where(SNAPSHOTS_LB.ID.eq(snapshotId)) - .fetchOptional(); - if (snapshot.isPresent()) { - if (Objects.isNull(snapshot.get().getProcessingStartedDate())) { - throw new BadRequestException(format(SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE, snapshot.get().getStatus())); + // if record has parsed record, validate by attempting format + if (Objects.nonNull(record.getParsedRecord())) { + try { + RecordType recordType = toRecordType(record.getRecordType().name()); + recordType.formatRecord(record); + Record2 dbParsedRecord = recordType.toDatabaseRecord2(record.getParsedRecord()); + dbParsedRecords.add(dbParsedRecord); + } catch (Exception e) { + // create error record and remove from record + Object content = Objects.nonNull(record.getParsedRecord()) + ? record.getParsedRecord().getContent() + : null; + ErrorRecord errorRecord = new ErrorRecord() + .withId(record.getId()) + .withDescription(e.getMessage()) + .withContent(content); + errorMessages.add(format(INVALID_PARSED_RECORD_MESSAGE_TEMPLATE, record.getId(), e.getMessage())); + record.withErrorRecord(errorRecord) + .withParsedRecord(null) + .withLeaderRecordStatus(null); + } } - } else { - throw new NotFoundException(format(SNAPSHOT_NOT_FOUND_TEMPLATE, snapshotId)); - } + if (Objects.nonNull(record.getRawRecord())) { + dbRawRecords.add(RawRecordDaoUtil.toDatabaseRawRecord(record.getRawRecord())); + } + if (Objects.nonNull(record.getErrorRecord())) { + dbErrorRecords.add(ErrorRecordDaoUtil.toDatabaseErrorRecord(record.getErrorRecord())); + } + dbRecords.add(RecordDaoUtil.toDatabaseRecord(record)); + }); - List ids = new ArrayList<>(); - Map matchedGenerations = new HashMap<>(); + UUID snapshotId = UUID.fromString(snapshotIds.stream().findFirst().orElseThrow()); - // lookup latest generation by matched id and committed snapshot updated before current snapshot - dsl.select(RECORDS_LB.MATCHED_ID, RECORDS_LB.ID, RECORDS_LB.GENERATION) - .distinctOn(RECORDS_LB.MATCHED_ID) - .from(RECORDS_LB) - .innerJoin(SNAPSHOTS_LB).on(RECORDS_LB.SNAPSHOT_ID.eq(SNAPSHOTS_LB.ID)) - .where(RECORDS_LB.MATCHED_ID.in(matchedIds) - .and(SNAPSHOTS_LB.STATUS.in(JobExecutionStatus.COMMITTED, JobExecutionStatus.ERROR, JobExecutionStatus.CANCELLED)) - .and(SNAPSHOTS_LB.UPDATED_DATE.lessThan(dsl - .select(SNAPSHOTS_LB.PROCESSING_STARTED_DATE) - .from(SNAPSHOTS_LB) - .where(SNAPSHOTS_LB.ID.eq(snapshotId))))) - .orderBy(RECORDS_LB.MATCHED_ID.asc(), RECORDS_LB.GENERATION.desc()) - .fetchStream().forEach(r -> { - UUID id = r.get(RECORDS_LB.ID); - UUID matchedId = r.get(RECORDS_LB.MATCHED_ID); - int generation = r.get(RECORDS_LB.GENERATION); - ids.add(id); - matchedGenerations.put(matchedId, generation); - }); + RecordType recordType = toRecordType(recordTypes.stream().findFirst().orElseThrow()); - LOG.info("saveRecords:: matched ids: {}", matchedIds); - LOG.info("saveRecords:: records ids: {}", ids); - LOG.info("saveRecords:: generations: {}", matchedGenerations); + try (Connection connection = getConnection(tenantId)) { + DSL.using(connection).transaction(ctx -> { + DSLContext dsl = DSL.using(ctx); - // update matching records state - if(!ids.isEmpty()) - { - dsl.update(RECORDS_LB) - .set(RECORDS_LB.STATE, RecordState.OLD) - .where(RECORDS_LB.ID.in(ids)) - .execute(); - } + // validate snapshot + Optional snapshot = DSL.using(ctx).selectFrom(SNAPSHOTS_LB) + .where(SNAPSHOTS_LB.ID.eq(snapshotId)) + .fetchOptional(); + if (snapshot.isPresent()) { + if (Objects.isNull(snapshot.get().getProcessingStartedDate())) { + throw new BadRequestException(format(SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE, snapshot.get().getStatus())); + } + } else { + throw new NotFoundException(format(SNAPSHOT_NOT_FOUND_TEMPLATE, snapshotId)); + } - // batch insert records updating generation if required - List recordsLoadingErrors = dsl.loadInto(RECORDS_LB) - .batchAfter(1000) - .bulkAfter(500) - .commitAfter(1000) - .onErrorAbort() - .loadRecords(dbRecords.stream() - .map(record -> { - Integer generation = matchedGenerations.get(record.getMatchedId()); - if (Objects.nonNull(generation)) { - record.setGeneration(generation + 1); - } else if (Objects.isNull(record.getGeneration())) { - record.setGeneration(0); - } - return record; - }) - .toList()) - .fieldsCorresponding() - .execute() - .errors(); - - recordsLoadingErrors.forEach(error -> { - if (error.exception().sqlState().equals(UNIQUE_VIOLATION_SQL_STATE)) { - throw new DuplicateEventException("SQL Unique constraint violation prevented repeatedly saving the record"); + List ids = new ArrayList<>(); + Map matchedGenerations = new HashMap<>(); + + // lookup latest generation by matched id and committed snapshot updated before current snapshot + dsl.select(RECORDS_LB.MATCHED_ID, RECORDS_LB.ID, RECORDS_LB.GENERATION) + .distinctOn(RECORDS_LB.MATCHED_ID) + .from(RECORDS_LB) + .innerJoin(SNAPSHOTS_LB).on(RECORDS_LB.SNAPSHOT_ID.eq(SNAPSHOTS_LB.ID)) + .where(RECORDS_LB.MATCHED_ID.in(matchedIds) + .and(SNAPSHOTS_LB.STATUS.in(JobExecutionStatus.COMMITTED, JobExecutionStatus.ERROR, JobExecutionStatus.CANCELLED)) + .and(SNAPSHOTS_LB.UPDATED_DATE.lessThan(dsl + .select(SNAPSHOTS_LB.PROCESSING_STARTED_DATE) + .from(SNAPSHOTS_LB) + .where(SNAPSHOTS_LB.ID.eq(snapshotId))))) + .orderBy(RECORDS_LB.MATCHED_ID.asc(), RECORDS_LB.GENERATION.desc()) + .fetchStream().forEach(r -> { + UUID id = r.get(RECORDS_LB.ID); + UUID matchedId = r.get(RECORDS_LB.MATCHED_ID); + int generation = r.get(RECORDS_LB.GENERATION); + ids.add(id); + matchedGenerations.put(matchedId, generation); + }); + + LOG.info("saveRecords:: matched ids: {}", matchedIds); + LOG.info("saveRecords:: records ids: {}", ids); + LOG.info("saveRecords:: generations: {}", matchedGenerations); + + // update matching records state + if(!ids.isEmpty()) + { + dsl.update(RECORDS_LB) + .set(RECORDS_LB.STATE, RecordState.OLD) + .where(RECORDS_LB.ID.in(ids)) + .execute(); } - LOG.warn("saveRecords:: Error occurred on batch execution: {}", error.exception().getCause().getMessage()); - LOG.debug("saveRecords:: Failed to execute statement from batch: {}", error.query()); - }); - // batch insert raw records - dsl.loadInto(RAW_RECORDS_LB) - .batchAfter(250) - .commitAfter(1000) - .onDuplicateKeyUpdate() - .onErrorAbort() - .loadRecords(dbRawRecords) - .fieldsCorresponding() - .execute(); - - // batch insert parsed records - recordType.toLoaderOptionsStep(dsl) - .batchAfter(250) - .commitAfter(1000) - .onDuplicateKeyUpdate() - .onErrorAbort() - .loadRecords(dbParsedRecords) - .fieldsCorresponding() - .execute(); - - if (!dbErrorRecords.isEmpty()) { - // batch insert error records - dsl.loadInto(ERROR_RECORDS_LB) + // batch insert records updating generation if required + List recordsLoadingErrors = dsl.loadInto(RECORDS_LB) + .batchAfter(1000) + .bulkAfter(500) + .commitAfter(1000) + .onErrorAbort() + .loadRecords(dbRecords.stream() + .map(record -> { + Integer generation = matchedGenerations.get(record.getMatchedId()); + if (Objects.nonNull(generation)) { + record.setGeneration(generation + 1); + } else if (Objects.isNull(record.getGeneration())) { + record.setGeneration(0); + } + return record; + }) + .toList()) + .fieldsCorresponding() + .execute() + .errors(); + + recordsLoadingErrors.forEach(error -> { + if (error.exception().sqlState().equals(UNIQUE_VIOLATION_SQL_STATE)) { + throw new DuplicateEventException("SQL Unique constraint violation prevented repeatedly saving the record"); + } + LOG.warn("saveRecords:: Error occurred on batch execution: {}", error.exception().getCause().getMessage()); + LOG.debug("saveRecords:: Failed to execute statement from batch: {}", error.query()); + }); + + // batch insert raw records + dsl.loadInto(RAW_RECORDS_LB) .batchAfter(250) .commitAfter(1000) .onDuplicateKeyUpdate() .onErrorAbort() - .loadRecords(dbErrorRecords) + .loadRecords(dbRawRecords) .fieldsCorresponding() .execute(); - } - promise.complete(new RecordsBatchResponse() - .withRecords(recordCollection.getRecords()) - .withTotalRecords(recordCollection.getRecords().size()) - .withErrorMessages(errorMessages)); - }); - } catch (DuplicateEventException e) { - LOG.info("saveRecords:: Skipped saving records due to duplicate event: {}", e.getMessage()); - promise.fail(e); - } catch (SQLException | DataAccessException e) { - LOG.warn("saveRecords:: Failed to save records", e); - promise.fail(e.getCause()); - } - }, - false, - r -> { - if (r.failed()) { - LOG.warn("saveRecords:: Error during batch record save", r.cause()); - finalPromise.fail(r.cause()); - } else { - LOG.debug("saveRecords:: batch record save was successful"); - finalPromise.complete(r.result()); - } - }); + // batch insert parsed records + recordType.toLoaderOptionsStep(dsl) + .batchAfter(250) + .commitAfter(1000) + .onDuplicateKeyUpdate() + .onErrorAbort() + .loadRecords(dbParsedRecords) + .fieldsCorresponding() + .execute(); + + if (!dbErrorRecords.isEmpty()) { + // batch insert error records + dsl.loadInto(ERROR_RECORDS_LB) + .batchAfter(250) + .commitAfter(1000) + .onDuplicateKeyUpdate() + .onErrorAbort() + .loadRecords(dbErrorRecords) + .fieldsCorresponding() + .execute(); + } + + promise.complete(new RecordsBatchResponse() + .withRecords(recordCollection.getRecords()) + .withTotalRecords(recordCollection.getRecords().size()) + .withErrorMessages(errorMessages)); + }); + } catch (DuplicateEventException e) { + LOG.info("saveRecords:: Skipped saving records due to duplicate event: {}", e.getMessage()); + promise.fail(e); + } catch (SQLException | DataAccessException e) { + LOG.warn("saveRecords:: Failed to save records", e); + promise.fail(e.getCause()); + } + }, + orderedBlocking, + r -> { + if (r.failed()) { + LOG.warn("saveRecords:: Error during batch record save", r.cause()); + finalPromise.fail(r.cause()); + } else { + LOG.debug("saveRecords:: batch record save was successful"); + finalPromise.complete(r.result()); + } + }); return finalPromise.future() .onSuccess(response -> response.getRecords() diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java index bdcba0b9b..3506cf11c 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java @@ -4,9 +4,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.function.Function; - -import io.vertx.core.Handler; import io.vertx.sqlclient.Row; import org.folio.dao.util.IdType; import org.folio.dao.util.RecordType; @@ -85,6 +82,19 @@ public interface RecordService { */ Future saveRecords(RecordCollection recordsCollection, Map okapiHeaders); + /** + * Saves collection of records + * + * @param recordsCollection records to save + * @param orderedBlocking boolean indicator to control if blocking logic needs to be run sequentially (true) or + * concurrently (false) by the Verticle. + * @param okapiHeaders okapi headers + * @return future with response containing list of successfully saved records and error messages for records that were not saved + */ + Future saveRecordsBlocking(RecordCollection recordsCollection, + boolean orderedBlocking, + Map okapiHeaders); + /** * Updates record with given id * @@ -270,26 +280,5 @@ public interface RecordService { */ Future updateRecordsState(String matchedId, RecordState state, RecordType recordType, String tenantId); - /** - * Searches for {@link Record} by {@link Condition} of {@link RecordType} with given offset and limit, applies the - * function to modify the found records and persist the updates. Once records persisting is complete it applies the - * provided post-update handler on operation's result. - * - * @param condition query where condition - * @param recordType record type - * @param offset starting index in a list of results - * @param limit limit of records for pagination - * @param recordsModifier function to apply modifications on fetched records - * @param okapiHeaders okapi headers - * @param postUpdateHandler handler on the updated records - * @return future with response containing list of successfully updated records and error messages for records that - * were not updated - */ - Future getAndUpdateRecordsBlocking(Condition condition, RecordType recordType, - int offset, int limit, - Function> recordsModifier, - Map okapiHeaders, - Handler postUpdateHandler); - Future deleteRecordById(String id, IdType idType, Map okapiHeaders); } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java index 6af009584..0d528ac0e 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java @@ -23,16 +23,12 @@ import io.reactivex.Flowable; import io.vertx.core.AsyncResult; -import io.vertx.core.Context; import io.vertx.core.Future; -import io.vertx.core.Handler; import io.vertx.core.Promise; -import io.vertx.core.Vertx; import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; import io.vertx.pgclient.PgException; import io.vertx.sqlclient.Row; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -42,11 +38,9 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; -import java.util.function.Function; import java.util.stream.Collectors; import javax.ws.rs.BadRequestException; import javax.ws.rs.NotFoundException; - import net.sf.jsqlparser.JSQLParserException; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -124,36 +118,6 @@ public Future getRecords(Condition condition, RecordType recor return recordDao.getRecords(condition, recordType, orderFields, offset, limit, tenantId); } - @Override - public Future getAndUpdateRecordsBlocking(Condition condition, RecordType recordType, - int offset, int limit, - Function> recordsModifier, - Map okapiHeaders, - Handler postUpdateHandler) { - var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); - Promise promise = Promise.promise(); - Context context = Vertx.currentContext(); - if(context == null) { - return Future.failedFuture("getAndUpdateRecordsBlocking must be executed by a Vertx thread"); - } - - context.owner().executeBlocking(() -> - getRecords(condition, recordType, Collections.emptyList(), offset, limit, tenantId) - .compose(recordsModifier::apply) - .compose(recordsCollection -> saveRecords(recordsCollection, okapiHeaders)) - .onComplete(asyncResult -> { - if (asyncResult.failed()) { - promise.fail(asyncResult.cause()); - } else { - postUpdateHandler.handle(asyncResult.result()); - promise.complete(asyncResult.result()); - } - }) - ); - - return promise.future(); - } - @Override public Flowable streamRecords(Condition condition, RecordType recordType, Collection> orderFields, int offset, int limit, String tenantId) { return recordDao.streamRecords(condition, recordType, orderFields, offset, limit, tenantId); @@ -206,6 +170,13 @@ public Future saveRecord(Record record, Map okapiHeaders @Override public Future saveRecords(RecordCollection recordCollection, Map okapiHeaders) { + return saveRecordsBlocking(recordCollection, false, okapiHeaders); + } + + @Override + public Future saveRecordsBlocking(RecordCollection recordCollection, + boolean orderedBlocking, + Map okapiHeaders) { if (recordCollection.getRecords().isEmpty()) { Promise promise = Promise.promise(); promise.complete(new RecordsBatchResponse().withTotalRecords(0)); @@ -216,7 +187,7 @@ public Future saveRecords(RecordCollection recordCollectio okapiHeaders.get(OKAPI_TENANT_HEADER)))); return GenericCompositeFuture.all(setMatchedIdsFutures) .compose(ar -> ar.succeeded() ? - recordDao.saveRecords(recordCollection, okapiHeaders) + recordDao.saveRecordsBlocking(recordCollection, orderedBlocking, okapiHeaders) : Future.failedFuture(ar.cause())) .recover(RecordServiceImpl::mapToDuplicateExceptionIfNeeded); } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java index 8094b3841..643d50666 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java @@ -19,7 +19,7 @@ public class AuthorityLinkChunkConsumersVerticle extends AbstractConsumerVerticl private final AuthorityLinkChunkKafkaHandler kafkaHandler; - @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:2}") + @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:1}") private int loadLimit; @Autowired From e627ecaaa0c3b04acd40a3c6f4a6c61f69a6d7fc Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Wed, 6 Nov 2024 14:05:44 +0500 Subject: [PATCH 10/19] modsource-817: add new save records method --- NEWS.md | 3 + mod-source-record-storage-client/pom.xml | 2 +- mod-source-record-storage-server/pom.xml | 2 +- .../AuthorityLinkChunkKafkaHandler.java | 222 +++---- .../main/java/org/folio/dao/RecordDao.java | 19 +- .../java/org/folio/dao/RecordDaoImpl.java | 553 ++++++++++-------- .../folio/dao/util/ErrorRecordDaoUtil.java | 30 +- .../folio/dao/util/ParsedRecordDaoUtil.java | 34 +- .../org/folio/dao/util/RawRecordDaoUtil.java | 29 +- .../org/folio/dao/util/RecordDaoUtil.java | 37 +- .../org/folio/dao/util/RecordMappers.java | 39 ++ .../org/folio/services/RecordService.java | 22 +- .../org/folio/services/RecordServiceImpl.java | 55 +- .../entities/RecordsModifierOperator.java | 24 + .../services/util/EventHandlingUtil.java | 5 +- .../AuthorityLinkChunkConsumersVerticle.java | 2 +- .../org/folio/services/RecordServiceTest.java | 75 ++- pom.xml | 2 +- 18 files changed, 685 insertions(+), 470 deletions(-) create mode 100644 mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordMappers.java create mode 100644 mod-source-record-storage-server/src/main/java/org/folio/services/entities/RecordsModifierOperator.java diff --git a/NEWS.md b/NEWS.md index 1cc6e4df4..4698ad081 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,6 @@ +## 2024-XX-XX 5.10.0-SNAPSHOT +* [MODSOURCE-817](https://folio-org.atlassian.net/browse/MODSOURCE-817) Fix data consistency in handling and updating Marc Bib records for links.instance-authority event + ## 2024-10-28 5.9.0 * [MODSOURCE-767](https://folio-org.atlassian.net/browse/MODSOURCE-767) Single record overlay creates duplicate OCLC#/035 * [MODSOURCE-756](https://issues.folio.org/browse/MODSOURCE-756) After setting an instance as marked for deletion it is no longer editable in quickmarc diff --git a/mod-source-record-storage-client/pom.xml b/mod-source-record-storage-client/pom.xml index 6be29123e..a3df89d30 100644 --- a/mod-source-record-storage-client/pom.xml +++ b/mod-source-record-storage-client/pom.xml @@ -6,7 +6,7 @@ org.folio mod-source-record-storage - 5.10.0-SNAPSHOT + 5.9.0-SNAPSHOT diff --git a/mod-source-record-storage-server/pom.xml b/mod-source-record-storage-server/pom.xml index 34f022480..f94f01671 100644 --- a/mod-source-record-storage-server/pom.xml +++ b/mod-source-record-storage-server/pom.xml @@ -5,7 +5,7 @@ org.folio mod-source-record-storage - 5.10.0-SNAPSHOT + 5.9.0-SNAPSHOT diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index 48dc5b675..aa91995dc 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -1,6 +1,5 @@ package org.folio.consumers; -import static java.util.Collections.emptyList; import static java.util.Objects.nonNull; import static org.apache.commons.collections4.CollectionUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.EMPTY; @@ -34,8 +33,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.folio.dao.util.IdType; -import org.folio.dao.util.RecordDaoUtil; import org.folio.dao.util.RecordType; import org.folio.kafka.AsyncRecordHandler; import org.folio.kafka.KafkaConfig; @@ -54,6 +51,7 @@ import org.folio.rest.jaxrs.model.UpdateTarget; import org.folio.services.RecordService; import org.folio.services.SnapshotService; +import org.folio.services.entities.RecordsModifierOperator; import org.folio.services.handlers.links.DeleteLinkProcessor; import org.folio.services.handlers.links.LinkProcessor; import org.folio.services.handlers.links.UpdateLinkProcessor; @@ -88,49 +86,25 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k @Override public Future handle(KafkaConsumerRecord consumerRecord) { var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); - LOGGER.info("handle:: Start Handling kafka record: {}", consumerRecord); + LOGGER.info("handle:: Start Handling kafka record"); var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) - .compose(event -> retrieveRecords(event, event.getTenant()) - .compose(recordCollection -> mapRecordFieldsChanges(event, recordCollection, userId)) - .compose(recordCollection -> recordService.saveRecordsBlocking(recordCollection, true, - toOkapiHeaders(consumerRecord.headers(), event.getTenant()))) - .map(recordsBatchResponse -> sendReports(recordsBatchResponse, event, consumerRecord.headers())) - .map(recordsBatchResponse -> mapRecordsToBibUpdateEvents(recordsBatchResponse, event)) - .compose(marcBibUpdates -> sendEvents(marcBibUpdates, event, consumerRecord)) - ).recover(th -> { - LOGGER.error("Failed to handle {} event", MARC_BIB.moduleTopicName(), th); - return Future.failedFuture(th); - } - ); -// var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); -// -// var result = mapToEvent(consumerRecord) -// .compose(this::createSnapshot) -// .compose(linksUpdate -> { -// var instanceIds = linksUpdate.getUpdateTargets().stream() -// .flatMap(updateTarget -> updateTarget.getLinks().stream() -// .map(Link::getInstanceId)) -// .distinct() -// .toList(); -// var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) -// .and(RecordDaoUtil.filterRecordByDeleted(false)); -// var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); -// Handler postUpdateHandler = recordsBatchResponse -> { -// sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); -// var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); -// sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); -// }; -// Function> recordsModifier = recordsCollection -> -// this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); -// -// return recordService.getAndUpdateRecordsBlocking(condition, RecordType.MARC_BIB, 0, instanceIds.size(), -// recordsModifier, okapiHeaders, postUpdateHandler); -// }) -// .map(batchResponse -> consumerRecord.key()); - - LOGGER.info("handle:: END Handling kafka record"); + .compose(linksUpdate -> { + var instanceIds = getBibRecordExternalIds(linksUpdate); + var okapiHeaders = toOkapiHeaders(consumerRecord.headers(), linksUpdate.getTenant()); + RecordsModifierOperator recordsModifier = recordsCollection -> + this.mapRecordFieldsChanges(linksUpdate, recordsCollection, userId); + + return recordService.saveRecordsByExternalIds(instanceIds, RecordType.MARC_BIB, recordsModifier, okapiHeaders) + .compose(recordsBatchResponse -> { + sendReports(recordsBatchResponse, linksUpdate, consumerRecord.headers()); + var marcBibUpdateStats = mapRecordsToBibUpdateEvents(recordsBatchResponse, linksUpdate); + return sendEvents(marcBibUpdateStats, linksUpdate, consumerRecord); + }); + }); + + LOGGER.info("handle:: Finish Handling kafka record"); return result; } @@ -146,98 +120,103 @@ private Future mapToEvent(KafkaConsumerRecord retrieveRecords(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, String tenantId) { - LOGGER.info("Retrieving bibs for jobId {}, authorityId {}", - bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); - var instanceIds = bibAuthorityLinksUpdate.getUpdateTargets().stream() + private Future createSnapshot(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { + var now = new Date(); + var snapshot = new Snapshot() + .withJobExecutionId(bibAuthorityLinksUpdate.getJobId()) + .withStatus(Snapshot.Status.COMMITTED) + .withProcessingStartedDate(now) + .withMetadata(new Metadata() + .withCreatedDate(now) + .withUpdatedDate(now)); + + return snapshotService.saveSnapshot(snapshot, bibAuthorityLinksUpdate.getTenant()) + .map(result -> bibAuthorityLinksUpdate); + } + + private List getBibRecordExternalIds(BibAuthorityLinksUpdate linksUpdate) { + return linksUpdate.getUpdateTargets().stream() .flatMap(updateTarget -> updateTarget.getLinks().stream() .map(Link::getInstanceId)) .distinct() .toList(); - - var condition = RecordDaoUtil.getExternalIdsCondition(instanceIds, IdType.INSTANCE) - .and(RecordDaoUtil.filterRecordByDeleted(false)); - return recordService.getRecords(condition, RecordType.MARC_BIB, emptyList(), 0, instanceIds.size(), tenantId); } - private Future mapRecordFieldsChanges(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, - RecordCollection recordCollection, String userId) { - LOGGER.info("Retrieved {} bib records for jobId {}, authorityId {}", + private RecordCollection mapRecordFieldsChanges(BibAuthorityLinksUpdate bibAuthorityLinksUpdate, + RecordCollection recordCollection, String userId) { + LOGGER.debug("Retrieved {} bib records for jobId {}, authorityId {}", recordCollection.getTotalRecords(), bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); - return getLinkProcessorForEvent(bibAuthorityLinksUpdate).map(linkProcessor -> { - recordCollection.getRecords().forEach(bibRecord -> { - var newRecordId = UUID.randomUUID().toString(); - var instanceId = bibRecord.getExternalIdsHolder().getInstanceId(); - var parsedRecord = bibRecord.getParsedRecord(); - var parsedRecordContent = readParsedContentToObjectRepresentation(bibRecord); - var fields = new LinkedList<>(parsedRecordContent.getDataFields()); - - var updateTargetFieldCodes = extractUpdateTargetFieldCodesForInstance(bibAuthorityLinksUpdate, instanceId); - var subfieldChanges = bibAuthorityLinksUpdate.getSubfieldsChanges().stream() - .filter(subfieldsChange -> updateTargetFieldCodes.contains(subfieldsChange.getField())) - .collect(Collectors.toMap(SubfieldsChange::getField, SubfieldsChange::getSubfields)); - - fields.forEach(field -> { - if (!updateTargetFieldCodes.contains(field.getTag())) { - return; - } - - var subfields = field.getSubfields(); - if (isEmpty(subfields)) { - return; - } - - var authorityId = getAuthorityIdSubfield(subfields); - if (authorityId.isEmpty() || !bibAuthorityLinksUpdate.getAuthorityId().equals(authorityId.get().getData())) { - return; - } - - var newSubfields = linkProcessor.process(field.getTag(), subfieldChanges.get(field.getTag()), subfields); - LOGGER.trace("JobId {}, AuthorityId {}, instanceId {}, field {}, old subfields: {}, new subfields: {}", - bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId(), - instanceId, field.getTag(), subfields, newSubfields); - - var newField = new DataFieldImpl(field.getTag(), field.getIndicator1(), field.getIndicator2()); - newSubfields.forEach(newField::addSubfield); - - var dataFields = parsedRecordContent.getDataFields(); - var fieldPosition = dataFields.indexOf(field); - dataFields.remove(fieldPosition); - dataFields.add(fieldPosition, newField); - }); - - parsedRecord.setContent(mapObjectRepresentationToParsedContentJsonString(parsedRecordContent)); - parsedRecord.setFormattedContent(EMPTY); - parsedRecord.setId(newRecordId); - bibRecord.setId(newRecordId); - bibRecord.getRawRecord().setId(newRecordId); - bibRecord.setSnapshotId(bibAuthorityLinksUpdate.getJobId()); - setUpdatedBy(bibRecord, userId); + var linkProcessor = getLinkProcessorForEvent(bibAuthorityLinksUpdate); + recordCollection.getRecords().forEach(bibRecord -> { + var newRecordId = UUID.randomUUID().toString(); + var instanceId = bibRecord.getExternalIdsHolder().getInstanceId(); + var parsedRecord = bibRecord.getParsedRecord(); + var parsedRecordContent = readParsedContentToObjectRepresentation(bibRecord); + var fields = new LinkedList<>(parsedRecordContent.getDataFields()); + + var updateTargetFieldCodes = extractUpdateTargetFieldCodesForInstance(bibAuthorityLinksUpdate, instanceId); + var subfieldChanges = bibAuthorityLinksUpdate.getSubfieldsChanges().stream() + .filter(subfieldsChange -> updateTargetFieldCodes.contains(subfieldsChange.getField())) + .collect(Collectors.toMap(SubfieldsChange::getField, SubfieldsChange::getSubfields)); + + fields.forEach(field -> { + if (!updateTargetFieldCodes.contains(field.getTag())) { + return; + } + + var subfields = field.getSubfields(); + if (isEmpty(subfields)) { + return; + } + + var authorityId = getAuthorityIdSubfield(subfields); + if (authorityId.isEmpty() || !bibAuthorityLinksUpdate.getAuthorityId().equals(authorityId.get().getData())) { + return; + } + + var newSubfields = linkProcessor.process(field.getTag(), subfieldChanges.get(field.getTag()), subfields); + LOGGER.trace("JobId {}, AuthorityId {}, instanceId {}, field {}, old subfields: {}, new subfields: {}", + bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId(), + instanceId, field.getTag(), subfields, newSubfields); + + var newField = new DataFieldImpl(field.getTag(), field.getIndicator1(), field.getIndicator2()); + newSubfields.forEach(newField::addSubfield); + + var dataFields = parsedRecordContent.getDataFields(); + var fieldPosition = dataFields.indexOf(field); + dataFields.remove(fieldPosition); + dataFields.add(fieldPosition, newField); }); - return recordCollection; + parsedRecord.setContent(mapObjectRepresentationToParsedContentJsonString(parsedRecordContent)); + parsedRecord.setFormattedContent(EMPTY); + parsedRecord.setId(newRecordId); + bibRecord.setId(newRecordId); + bibRecord.getRawRecord().setId(newRecordId); + bibRecord.setSnapshotId(bibAuthorityLinksUpdate.getJobId()); + setUpdatedBy(bibRecord, userId); }); + return recordCollection; } - private Future getLinkProcessorForEvent(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { + private LinkProcessor getLinkProcessorForEvent(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { var eventType = bibAuthorityLinksUpdate.getType(); switch (eventType) { case DELETE -> { LOGGER.debug("Precessing DELETE event for jobId {}, authorityId {}", bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); - return Future.succeededFuture(new DeleteLinkProcessor()); + return new DeleteLinkProcessor(); } case UPDATE -> { LOGGER.debug("Precessing UPDATE event for jobId {}, authorityId {}", bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()); - return Future.succeededFuture(new UpdateLinkProcessor()); + return new UpdateLinkProcessor(); } - default -> { - return Future.failedFuture(new IllegalArgumentException( + default -> + throw new IllegalArgumentException( String.format("Unsupported event type: %s for jobId %s, authorityId %s", - eventType, bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId()))); - } + eventType, bibAuthorityLinksUpdate.getJobId(), bibAuthorityLinksUpdate.getAuthorityId())); } } @@ -258,7 +237,7 @@ private Optional getAuthorityIdSubfield(List subfields) { private List mapRecordsToBibUpdateEvents(RecordsBatchResponse batchResponse, BibAuthorityLinksUpdate event) { - LOGGER.info("Updated {} bibs for jobId {}, authorityId {}", + LOGGER.debug("Updated {} bibs for jobId {}, authorityId {}", batchResponse.getTotalRecords(), event.getJobId(), event.getAuthorityId()); var errors = batchResponse.getErrorMessages(); @@ -313,20 +292,6 @@ private List toFailedLinkUpdateReports(List errorRecor .toList(); } - private Future createSnapshot(BibAuthorityLinksUpdate bibAuthorityLinksUpdate) { - var now = new Date(); - var snapshot = new Snapshot() - .withJobExecutionId(bibAuthorityLinksUpdate.getJobId()) - .withStatus(Snapshot.Status.COMMITTED) - .withProcessingStartedDate(now) - .withMetadata(new Metadata() - .withCreatedDate(now) - .withUpdatedDate(now)); - - return snapshotService.saveSnapshot(snapshot, bibAuthorityLinksUpdate.getTenant()) - .map(result -> bibAuthorityLinksUpdate); - } - private void setUpdatedBy(Record changedRecord, String userId) { if (StringUtils.isNotBlank(userId)) { if (changedRecord.getMetadata() != null) { @@ -337,8 +302,8 @@ private void setUpdatedBy(Record changedRecord, String userId) { } } - private RecordsBatchResponse sendReports(RecordsBatchResponse batchResponse, BibAuthorityLinksUpdate event, - List headers) { + private void sendReports(RecordsBatchResponse batchResponse, BibAuthorityLinksUpdate event, + List headers) { var errorRecords = getErrorRecords(batchResponse); if (!errorRecords.isEmpty()) { LOGGER.info("Errors detected. Sending {} linking reports for jobId {}, authorityId {}", @@ -347,7 +312,6 @@ private RecordsBatchResponse sendReports(RecordsBatchResponse batchResponse, Bib toFailedLinkUpdateReports(errorRecords, event).forEach(report -> sendEventToKafka(LINKS_STATS, report.getTenant(), report.getJobId(), report, headers)); } - return batchResponse; } private Future sendEvents(List marcBibUpdateEvents, BibAuthorityLinksUpdate event, diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java index 11975e992..b5860b610 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDao.java @@ -9,6 +9,7 @@ import java.util.Map; import java.util.Optional; import java.util.function.Function; + import net.sf.jsqlparser.JSQLParserException; import org.folio.dao.util.IdType; import org.folio.dao.util.MatchField; @@ -26,6 +27,7 @@ import org.folio.rest.jaxrs.model.StrippedParsedRecordCollection; import org.folio.rest.jooq.enums.RecordState; import org.folio.services.RecordSearchParameters; +import org.folio.services.entities.RecordsModifierOperator; import org.folio.services.util.TypeConnection; import org.folio.services.util.parser.ParseFieldsResult; import org.folio.services.util.parser.ParseLeaderResult; @@ -209,8 +211,6 @@ Future getMatchedRecordsIdentifiers(MatchField mat /** * Saves {@link RecordCollection} to the db. - * This is same as calling {@link RecordDao#saveRecordsBlocking(RecordCollection, boolean, Map)} specifying - * orderedBLocking = false. * * @param recordCollection Record collection to save * @param okapiHeaders okapi headers @@ -219,17 +219,18 @@ Future getMatchedRecordsIdentifiers(MatchField mat Future saveRecords(RecordCollection recordCollection, Map okapiHeaders); /** - * Saves {@link RecordCollection} to the db + * Saves {@link RecordCollection} to the db. * - * @param recordCollection Record collection to save + * @param externalIds external relation ids + * @param recordType record type + * @param recordsModifier records collection modifier operator * @param okapiHeaders okapi headers - * @param orderedBlocking boolean indicator to control if blocking logic needs to be run sequentially (true) or - * concurrently (false) by the Verticle. * @return future with saved {@link RecordsBatchResponse} */ - Future saveRecordsBlocking(RecordCollection recordCollection, - boolean orderedBlocking, - Map okapiHeaders); + Future saveRecordsByExternalIds(List externalIds, + RecordType recordType, + RecordsModifierOperator recordsModifier, + Map okapiHeaders); /** * Updates {{@link Record} in the db diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index baa931589..aba859d5c 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -13,6 +13,7 @@ import static org.folio.dao.util.RecordDaoUtil.filterRecordByType; import static org.folio.dao.util.RecordDaoUtil.getExternalHrid; import static org.folio.dao.util.RecordDaoUtil.getExternalId; +import static org.folio.dao.util.RecordDaoUtil.getExternalIdType; import static org.folio.dao.util.SnapshotDaoUtil.SNAPSHOT_NOT_FOUND_TEMPLATE; import static org.folio.dao.util.SnapshotDaoUtil.SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE; import static org.folio.rest.jooq.Tables.ERROR_RECORDS_LB; @@ -71,6 +72,7 @@ import net.sf.jsqlparser.expression.operators.conditional.AndExpression; import net.sf.jsqlparser.expression.operators.conditional.OrExpression; import net.sf.jsqlparser.parser.CCJSqlParserUtil; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.text.StrSubstitutor; import org.apache.commons.lang3.StringUtils; @@ -115,15 +117,16 @@ import org.folio.rest.jooq.tables.records.SnapshotsLbRecord; import org.folio.services.RecordSearchParameters; import org.folio.services.domainevent.RecordDomainEventPublisher; +import org.folio.services.entities.RecordsModifierOperator; import org.folio.services.util.TypeConnection; import org.folio.services.util.parser.ParseFieldsResult; import org.folio.services.util.parser.ParseLeaderResult; import org.jooq.CommonTableExpression; import org.jooq.Condition; +import org.jooq.Configuration; import org.jooq.DSLContext; import org.jooq.Field; import org.jooq.JSONB; -import org.jooq.LoaderError; import org.jooq.Name; import org.jooq.OrderField; import org.jooq.Record1; @@ -259,32 +262,10 @@ public Future getRecords(Condition condition, RecordType recor } @Override - public Future getRecords(Condition condition, RecordType recordType, Collection> orderFields, int offset, int limit, boolean returnTotalCount, String tenantId) { - Name cte = name(CTE); - Name prt = name(recordType.getTableName()); - return getQueryExecutor(tenantId).transaction(txQE -> txQE.query(dsl -> { - ResultQuery> countQuery; - if (returnTotalCount) { - countQuery = dsl.selectCount() - .from(RECORDS_LB) - .where(condition.and(recordType.getRecordImplicitCondition())); - } else { - countQuery = select(inline(null, Integer.class).as(COUNT)); - } - - return dsl - .with(cte.as(countQuery)) - .select(getAllRecordFieldsWithCount(prt)) - .from(RECORDS_LB) - .leftJoin(table(prt)).on(RECORDS_LB.ID.eq(field(TABLE_FIELD_TEMPLATE, UUID.class, prt, name(ID)))) - .leftJoin(RAW_RECORDS_LB).on(RECORDS_LB.ID.eq(RAW_RECORDS_LB.ID)) - .leftJoin(ERROR_RECORDS_LB).on(RECORDS_LB.ID.eq(ERROR_RECORDS_LB.ID)) - .rightJoin(dsl.select().from(table(cte))).on(trueCondition()) - .where(condition.and(recordType.getRecordImplicitCondition())) - .orderBy(orderFields) - .offset(offset) - .limit(limit > 0 ? limit : DEFAULT_LIMIT_FOR_GET_RECORDS); - } + public Future getRecords(Condition condition, RecordType recordType, Collection> orderFields, + int offset, int limit, boolean returnTotalCount, String tenantId) { + return getQueryExecutor(tenantId).transaction(txQE -> txQE.query(dsl -> + readRecords(dsl, condition, recordType, offset, limit, returnTotalCount, orderFields) )).map(queryResult -> toRecordCollectionWithLimitCheck(queryResult, limit)); } @@ -757,237 +738,96 @@ public Future saveRecord(Record record, Map okapiHeaders } @Override - public Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Record record, + public Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Record sourceRecord, Map okapiHeaders) { - LOG.trace("saveRecord:: Saving {} record {}", record.getRecordType(), record.getId()); - return insertOrUpdateRecord(txQE, record) + LOG.trace("saveRecord:: Saving {} record {}", sourceRecord.getRecordType(), sourceRecord.getId()); + return insertOrUpdateRecord(txQE, sourceRecord) .onSuccess(created -> recordDomainEventPublisher.publishRecordCreated(created, okapiHeaders)); } @Override public Future saveRecords(RecordCollection recordCollection, Map okapiHeaders) { - return saveRecordsBlocking(recordCollection, false, okapiHeaders); - } - - @Override - public Future saveRecordsBlocking(RecordCollection recordCollection, - boolean orderedBlocking, - Map okapiHeaders) { var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); logRecordCollection("saveRecords:: Saving", recordCollection, tenantId); + var firstRecord = recordCollection.getRecords().iterator().next(); + var snapshotId = firstRecord.getSnapshotId(); + var recordType = RecordType.valueOf(firstRecord.getRecordType().name()); Promise finalPromise = Promise.promise(); Context context = Vertx.currentContext(); - if(context == null) return Future.failedFuture("saveRecords must be executed by a Vertx thread"); - context.owner().executeBlocking(promise -> { - Set matchedIds = new HashSet<>(); - Set snapshotIds = new HashSet<>(); - Set recordTypes = new HashSet<>(); - - List dbRecords = new ArrayList<>(); - List dbRawRecords = new ArrayList<>(); - List> dbParsedRecords = new ArrayList<>(); - List dbErrorRecords = new ArrayList<>(); - List errorMessages = new ArrayList<>(); - - recordCollection.getRecords() - .stream() - .map(RecordDaoUtil::ensureRecordHasId) - .map(RecordDaoUtil::ensureRecordHasMatchedId) - .map(RecordDaoUtil::ensureRecordHasSuppressDiscovery) - .map(RecordDaoUtil::ensureRecordForeignKeys) - .forEach(record -> { - // collect unique matched ids to query to determine generation - matchedIds.add(UUID.fromString(record.getMatchedId())); - - // make sure only one snapshot id - snapshotIds.add(record.getSnapshotId()); - if (snapshotIds.size() > 1) { - throw new BadRequestException("Batch record collection only supports single snapshot"); - } - - if(Objects.nonNull(record.getRecordType())) { - recordTypes.add(record.getRecordType().name()); - } else { - throw new BadRequestException(StringUtils.defaultIfEmpty(record.getErrorRecord().getDescription(), String.format("Record with id %s has not record type", record.getId()))); - } + if(context == null) { + return Future.failedFuture("saveRecords must be executed by a Vertx thread"); + } - // make sure only one record type - if (recordTypes.size() > 1) { - throw new BadRequestException("Batch record collection only supports single record type"); - } + context.owner().executeBlocking( + () -> saveRecords(recordCollection, snapshotId, recordType, tenantId), + false, + r -> { + if (r.failed()) { + LOG.warn("saveRecords:: Error during batch record save", r.cause()); + finalPromise.fail(r.cause()); + } else { + LOG.debug("saveRecords:: batch record save was successful"); + finalPromise.complete(r.result()); + } + }); - // if record has parsed record, validate by attempting format - if (Objects.nonNull(record.getParsedRecord())) { - try { - RecordType recordType = toRecordType(record.getRecordType().name()); - recordType.formatRecord(record); - Record2 dbParsedRecord = recordType.toDatabaseRecord2(record.getParsedRecord()); - dbParsedRecords.add(dbParsedRecord); - } catch (Exception e) { - // create error record and remove from record - Object content = Objects.nonNull(record.getParsedRecord()) - ? record.getParsedRecord().getContent() - : null; - ErrorRecord errorRecord = new ErrorRecord() - .withId(record.getId()) - .withDescription(e.getMessage()) - .withContent(content); - errorMessages.add(format(INVALID_PARSED_RECORD_MESSAGE_TEMPLATE, record.getId(), e.getMessage())); - record.withErrorRecord(errorRecord) - .withParsedRecord(null) - .withLeaderRecordStatus(null); - } - } - if (Objects.nonNull(record.getRawRecord())) { - dbRawRecords.add(RawRecordDaoUtil.toDatabaseRawRecord(record.getRawRecord())); - } - if (Objects.nonNull(record.getErrorRecord())) { - dbErrorRecords.add(ErrorRecordDaoUtil.toDatabaseErrorRecord(record.getErrorRecord())); - } - dbRecords.add(RecordDaoUtil.toDatabaseRecord(record)); - }); + return finalPromise.future() + .onSuccess(response -> response.getRecords() + .forEach(r -> recordDomainEventPublisher.publishRecordCreated(r, okapiHeaders)) + ); + } - UUID snapshotId = UUID.fromString(snapshotIds.stream().findFirst().orElseThrow()); + @Override + public Future saveRecordsByExternalIds(List externalIds, + RecordType recordType, + RecordsModifierOperator recordsModifier, + Map okapiHeaders) { + var condition = RecordDaoUtil.getExternalIdsCondition(externalIds, + getExternalIdType(Record.RecordType.fromValue(recordType.name()))) + .and(RecordDaoUtil.filterRecordByDeleted(false)); - RecordType recordType = toRecordType(recordTypes.stream().findFirst().orElseThrow()); + var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); + Promise finalPromise = Promise.promise(); + Context context = Vertx.currentContext(); + if(context == null) { + return Future.failedFuture("saveRecordsByExternalIds:: operation must be executed by a Vertx thread"); + } + context.owner().executeBlocking( + () -> { + final RecordCollection recordCollection; try (Connection connection = getConnection(tenantId)) { - DSL.using(connection).transaction(ctx -> { + recordCollection = DSL.using(connection).transactionResult(ctx -> { DSLContext dsl = DSL.using(ctx); - - // validate snapshot - Optional snapshot = DSL.using(ctx).selectFrom(SNAPSHOTS_LB) - .where(SNAPSHOTS_LB.ID.eq(snapshotId)) - .fetchOptional(); - if (snapshot.isPresent()) { - if (Objects.isNull(snapshot.get().getProcessingStartedDate())) { - throw new BadRequestException(format(SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE, snapshot.get().getStatus())); - } - } else { - throw new NotFoundException(format(SNAPSHOT_NOT_FOUND_TEMPLATE, snapshotId)); - } - - List ids = new ArrayList<>(); - Map matchedGenerations = new HashMap<>(); - - // lookup latest generation by matched id and committed snapshot updated before current snapshot - dsl.select(RECORDS_LB.MATCHED_ID, RECORDS_LB.ID, RECORDS_LB.GENERATION) - .distinctOn(RECORDS_LB.MATCHED_ID) - .from(RECORDS_LB) - .innerJoin(SNAPSHOTS_LB).on(RECORDS_LB.SNAPSHOT_ID.eq(SNAPSHOTS_LB.ID)) - .where(RECORDS_LB.MATCHED_ID.in(matchedIds) - .and(SNAPSHOTS_LB.STATUS.in(JobExecutionStatus.COMMITTED, JobExecutionStatus.ERROR, JobExecutionStatus.CANCELLED)) - .and(SNAPSHOTS_LB.UPDATED_DATE.lessThan(dsl - .select(SNAPSHOTS_LB.PROCESSING_STARTED_DATE) - .from(SNAPSHOTS_LB) - .where(SNAPSHOTS_LB.ID.eq(snapshotId))))) - .orderBy(RECORDS_LB.MATCHED_ID.asc(), RECORDS_LB.GENERATION.desc()) - .fetchStream().forEach(r -> { - UUID id = r.get(RECORDS_LB.ID); - UUID matchedId = r.get(RECORDS_LB.MATCHED_ID); - int generation = r.get(RECORDS_LB.GENERATION); - ids.add(id); - matchedGenerations.put(matchedId, generation); - }); - - LOG.info("saveRecords:: matched ids: {}", matchedIds); - LOG.info("saveRecords:: records ids: {}", ids); - LOG.info("saveRecords:: generations: {}", matchedGenerations); - - // update matching records state - if(!ids.isEmpty()) - { - dsl.update(RECORDS_LB) - .set(RECORDS_LB.STATE, RecordState.OLD) - .where(RECORDS_LB.ID.in(ids)) - .execute(); - } - - // batch insert records updating generation if required - List recordsLoadingErrors = dsl.loadInto(RECORDS_LB) - .batchAfter(1000) - .bulkAfter(500) - .commitAfter(1000) - .onErrorAbort() - .loadRecords(dbRecords.stream() - .map(record -> { - Integer generation = matchedGenerations.get(record.getMatchedId()); - if (Objects.nonNull(generation)) { - record.setGeneration(generation + 1); - } else if (Objects.isNull(record.getGeneration())) { - record.setGeneration(0); - } - return record; - }) - .toList()) - .fieldsCorresponding() - .execute() - .errors(); - - recordsLoadingErrors.forEach(error -> { - if (error.exception().sqlState().equals(UNIQUE_VIOLATION_SQL_STATE)) { - throw new DuplicateEventException("SQL Unique constraint violation prevented repeatedly saving the record"); - } - LOG.warn("saveRecords:: Error occurred on batch execution: {}", error.exception().getCause().getMessage()); - LOG.debug("saveRecords:: Failed to execute statement from batch: {}", error.query()); - }); - - // batch insert raw records - dsl.loadInto(RAW_RECORDS_LB) - .batchAfter(250) - .commitAfter(1000) - .onDuplicateKeyUpdate() - .onErrorAbort() - .loadRecords(dbRawRecords) - .fieldsCorresponding() - .execute(); - - // batch insert parsed records - recordType.toLoaderOptionsStep(dsl) - .batchAfter(250) - .commitAfter(1000) - .onDuplicateKeyUpdate() - .onErrorAbort() - .loadRecords(dbParsedRecords) - .fieldsCorresponding() - .execute(); - - if (!dbErrorRecords.isEmpty()) { - // batch insert error records - dsl.loadInto(ERROR_RECORDS_LB) - .batchAfter(250) - .commitAfter(1000) - .onDuplicateKeyUpdate() - .onErrorAbort() - .loadRecords(dbErrorRecords) - .fieldsCorresponding() - .execute(); - } - - promise.complete(new RecordsBatchResponse() - .withRecords(recordCollection.getRecords()) - .withTotalRecords(recordCollection.getRecords().size()) - .withErrorMessages(errorMessages)); + var queryResult = readRecords(dsl, condition, recordType, 0, externalIds.size(), false, emptyList()); + var records = queryResult.fetch(this::toRecord); + return new RecordCollection().withRecords(records).withTotalRecords(records.size()); }); - } catch (DuplicateEventException e) { - LOG.info("saveRecords:: Skipped saving records due to duplicate event: {}", e.getMessage()); - promise.fail(e); } catch (SQLException | DataAccessException e) { - LOG.warn("saveRecords:: Failed to save records", e); - promise.fail(e.getCause()); + LOG.warn("saveRecordsByExternalIds:: Failed to read records", e); + throw e; + } + + if (recordCollection == null || CollectionUtils.isEmpty(recordCollection.getRecords())) { + LOG.warn("saveRecordsByExternalIds:: No records returned from the fetch query"); + return new RecordsBatchResponse().withTotalRecords(0); } + + var modifiedRecords = recordsModifier.apply(recordCollection); + var snapshotId = modifiedRecords.getRecords().iterator().next().getSnapshotId(); + return saveRecords(modifiedRecords, snapshotId, recordType, tenantId); }, - orderedBlocking, r -> { if (r.failed()) { - LOG.warn("saveRecords:: Error during batch record save", r.cause()); + LOG.warn("saveRecordsByExternalIds:: Error during batch record save", r.cause()); finalPromise.fail(r.cause()); } else { - LOG.debug("saveRecords:: batch record save was successful"); + LOG.debug("saveRecordsByExternalIds:: batch record save was successful"); finalPromise.complete(r.result()); } - }); + } + ); return finalPromise.future() .onSuccess(response -> response.getRecords() @@ -995,6 +835,225 @@ public Future saveRecordsBlocking(RecordCollection recordC ); } + private ResultQuery readRecords(DSLContext dsl, Condition condition, RecordType recordType, int offset, int limit, + boolean returnTotalCount, Collection> orderFields) { + Name cte = name(CTE); + Name prt = name(recordType.getTableName()); + var finalCondition = condition.and(recordType.getRecordImplicitCondition()); + + ResultQuery> countQuery; + if (returnTotalCount) { + countQuery = dsl.selectCount() + .from(RECORDS_LB) + .where(finalCondition); + } else { + countQuery = select(inline(null, Integer.class).as(COUNT)); + } + + return dsl + .with(cte.as(countQuery)) + .select(getAllRecordFieldsWithCount(prt)) + .from(RECORDS_LB) + .leftJoin(table(prt)).on(RECORDS_LB.ID.eq(field(TABLE_FIELD_TEMPLATE, UUID.class, prt, name(ID)))) + .leftJoin(RAW_RECORDS_LB).on(RECORDS_LB.ID.eq(RAW_RECORDS_LB.ID)) + .leftJoin(ERROR_RECORDS_LB).on(RECORDS_LB.ID.eq(ERROR_RECORDS_LB.ID)) + .rightJoin(dsl.select().from(table(cte))).on(trueCondition()) + .where(finalCondition) + .orderBy(orderFields) + .offset(offset) + .limit(limit > 0 ? limit : DEFAULT_LIMIT_FOR_GET_RECORDS); + } + + private RecordsBatchResponse saveRecords(RecordCollection recordCollection, String snapshotId, RecordType recordType, + String tenantId) throws SQLException { + Set matchedIds = new HashSet<>(); + List dbRecords = new ArrayList<>(); + List dbRawRecords = new ArrayList<>(); + List> dbParsedRecords = new ArrayList<>(); + List dbErrorRecords = new ArrayList<>(); + + List errorMessages = new ArrayList<>(); + + recordCollection.getRecords() + .stream() + .map(RecordDaoUtil::ensureRecordHasId) + .map(RecordDaoUtil::ensureRecordHasMatchedId) + .map(RecordDaoUtil::ensureRecordHasSuppressDiscovery) + .map(RecordDaoUtil::ensureRecordForeignKeys) + .forEach(record -> { + // collect unique matched ids to query to determine generation + matchedIds.add(UUID.fromString(record.getMatchedId())); + + // make sure only one snapshot id + if (!Objects.equals(snapshotId, record.getSnapshotId())) { + throw new BadRequestException("Batch record collection only supports single snapshot"); + } + validateRecordType(record, recordType); + + // if record has parsed record, validate by attempting format + if (Objects.nonNull(record.getParsedRecord())) { + try { + recordType.formatRecord(record); + Record2 dbParsedRecord = recordType.toDatabaseRecord2(record.getParsedRecord()); + dbParsedRecords.add(dbParsedRecord); + } catch (Exception e) { + // create error record and remove from record + var errorRecord = new ErrorRecord() + .withId(record.getId()) + .withDescription(e.getMessage()) + .withContent(record.getParsedRecord().getContent()); + errorMessages.add(format(INVALID_PARSED_RECORD_MESSAGE_TEMPLATE, record.getId(), e.getMessage())); + record.withErrorRecord(errorRecord) + .withParsedRecord(null) + .withLeaderRecordStatus(null); + } + } + if (Objects.nonNull(record.getRawRecord())) { + dbRawRecords.add(RawRecordDaoUtil.toDatabaseRawRecord(record.getRawRecord())); + } + if (Objects.nonNull(record.getErrorRecord())) { + dbErrorRecords.add(ErrorRecordDaoUtil.toDatabaseErrorRecord(record.getErrorRecord())); + } + dbRecords.add(RecordDaoUtil.toDatabaseRecord(record)); + }); + + try (Connection connection = getConnection(tenantId)) { + return DSL.using(connection).transactionResult(ctx -> { + DSLContext dsl = DSL.using(ctx); + + // validate snapshot + validateSnapshot(UUID.fromString(snapshotId), ctx); + + List ids = new ArrayList<>(); + Map matchedGenerations = new HashMap<>(); + + // lookup the latest generation by matched id and committed snapshot updated before current snapshot + dsl.select(RECORDS_LB.MATCHED_ID, RECORDS_LB.ID, RECORDS_LB.GENERATION) + .distinctOn(RECORDS_LB.MATCHED_ID) + .from(RECORDS_LB) + .innerJoin(SNAPSHOTS_LB).on(RECORDS_LB.SNAPSHOT_ID.eq(SNAPSHOTS_LB.ID)) + .where(RECORDS_LB.MATCHED_ID.in(matchedIds) + .and(SNAPSHOTS_LB.STATUS.in(JobExecutionStatus.COMMITTED, JobExecutionStatus.ERROR, JobExecutionStatus.CANCELLED)) + .and(SNAPSHOTS_LB.UPDATED_DATE.lessThan(dsl + .select(SNAPSHOTS_LB.PROCESSING_STARTED_DATE) + .from(SNAPSHOTS_LB) + .where(SNAPSHOTS_LB.ID.eq(UUID.fromString(snapshotId)))))) + .orderBy(RECORDS_LB.MATCHED_ID.asc(), RECORDS_LB.GENERATION.desc()) + .fetchStream().forEach(r -> { + UUID id = r.get(RECORDS_LB.ID); + UUID matchedId = r.get(RECORDS_LB.MATCHED_ID); + int generation = r.get(RECORDS_LB.GENERATION); + ids.add(id); + matchedGenerations.put(matchedId, generation); + }); + + // update matching records state + if(!ids.isEmpty()) + { + dsl.update(RECORDS_LB) + .set(RECORDS_LB.STATE, RecordState.OLD) + .where(RECORDS_LB.ID.in(ids)) + .execute(); + } + + // batch insert records updating generation if required + var recordsLoadingErrors = dsl.loadInto(RECORDS_LB) + .batchAfter(1000) + .bulkAfter(500) + .commitAfter(1000) + .onErrorAbort() + .loadRecords(dbRecords.stream() + .map(record -> { + Integer generation = matchedGenerations.get(record.getMatchedId()); + if (Objects.nonNull(generation)) { + record.setGeneration(generation + 1); + } else if (Objects.isNull(record.getGeneration())) { + record.setGeneration(0); + } + return record; + }) + .toList()) + .fieldsCorresponding() + .execute() + .errors(); + + recordsLoadingErrors.forEach(error -> { + if (error.exception().sqlState().equals(UNIQUE_VIOLATION_SQL_STATE)) { + throw new DuplicateEventException("SQL Unique constraint violation prevented repeatedly saving the record"); + } + LOG.warn("saveRecords:: Error occurred on batch execution: {}", error.exception().getCause().getMessage()); + LOG.debug("saveRecords:: Failed to execute statement from batch: {}", error.query()); + }); + + // batch insert raw records + dsl.loadInto(RAW_RECORDS_LB) + .batchAfter(250) + .commitAfter(1000) + .onDuplicateKeyUpdate() + .onErrorAbort() + .loadRecords(dbRawRecords) + .fieldsCorresponding() + .execute(); + + // batch insert parsed records + recordType.toLoaderOptionsStep(dsl) + .batchAfter(250) + .commitAfter(1000) + .onDuplicateKeyUpdate() + .onErrorAbort() + .loadRecords(dbParsedRecords) + .fieldsCorresponding() + .execute(); + + if (!dbErrorRecords.isEmpty()) { + // batch insert error records + dsl.loadInto(ERROR_RECORDS_LB) + .batchAfter(250) + .commitAfter(1000) + .onDuplicateKeyUpdate() + .onErrorAbort() + .loadRecords(dbErrorRecords) + .fieldsCorresponding() + .execute(); + } + + return new RecordsBatchResponse() + .withRecords(recordCollection.getRecords()) + .withTotalRecords(recordCollection.getRecords().size()) + .withErrorMessages(errorMessages); + }); + } catch (DuplicateEventException e) { + LOG.info("saveRecords:: Skipped saving records due to duplicate event: {}", e.getMessage()); + throw e; + } catch (SQLException | DataAccessException e) { + LOG.warn("saveRecords:: Failed to save records", e); + throw e; + } + } + + private void validateRecordType(Record record, RecordType recordType) { + if (record.getRecordType() == null) { + var error = record.getErrorRecord() != null ? record.getErrorRecord().getDescription() : ""; + throw new BadRequestException( + StringUtils.defaultIfEmpty(error, String.format("Record with id %s has not record type", record.getId()))); + } + + if (RecordType.valueOf(record.getRecordType().name()) != recordType) { + throw new BadRequestException("Batch record collection only supports single record type"); + } + } + + private void validateSnapshot(UUID snapshotId, Configuration ctx) { + Optional snapshot = DSL.using(ctx).selectFrom(SNAPSHOTS_LB) + .where(SNAPSHOTS_LB.ID.eq(snapshotId)) + .fetchOptional(); + if (snapshot.isPresent() && Objects.isNull(snapshot.get().getProcessingStartedDate())) { + throw new BadRequestException(format(SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE, snapshot.get().getStatus())); + } else if (snapshot.isEmpty()) { + throw new NotFoundException(format(SNAPSHOT_NOT_FOUND_TEMPLATE, snapshotId)); + } + } + @Override public Future updateRecord(Record record, Map okapiHeaders) { var tenantId = okapiHeaders.get(OKAPI_TENANT_HEADER); @@ -1612,11 +1671,11 @@ private Future updateExternalIdsForRecord(ReactiveClassicGenericQueryEx }); } - private Record validateParsedRecordId(Record record) { - if (Objects.isNull(record.getParsedRecord()) || StringUtils.isEmpty(record.getParsedRecord().getId())) { + private Record validateParsedRecordId(Record recordDto) { + if (Objects.isNull(recordDto.getParsedRecord()) || StringUtils.isEmpty(recordDto.getParsedRecord().getId())) { throw new BadRequestException("Each parsed record should contain an id"); } - return record; + return recordDto; } private Field[] getRecordFields(Name prt) { @@ -1716,20 +1775,38 @@ private SourceRecord toSourceRecord(Row row) { } private Record toRecord(Row row) { - Record record = RecordDaoUtil.toRecord(row); + Record recordDto = RecordDaoUtil.toRecord(row); RawRecord rawRecord = RawRecordDaoUtil.toJoinedRawRecord(row); if (Objects.nonNull(rawRecord.getContent())) { - record.setRawRecord(rawRecord); + recordDto.setRawRecord(rawRecord); } ParsedRecord parsedRecord = ParsedRecordDaoUtil.toJoinedParsedRecord(row); if (Objects.nonNull(parsedRecord.getContent())) { - record.setParsedRecord(parsedRecord); + recordDto.setParsedRecord(parsedRecord); } ErrorRecord errorRecord = ErrorRecordDaoUtil.toJoinedErrorRecord(row); if (Objects.nonNull(errorRecord.getContent())) { - record.setErrorRecord(errorRecord); + recordDto.setErrorRecord(errorRecord); + } + return recordDto; + } + + private Record toRecord(org.jooq.Record dbRecord) { + Record recordDto = RecordDaoUtil.toRecord(dbRecord); + RawRecord rawRecord = RawRecordDaoUtil.toJoinedRawRecord(dbRecord); + if (Objects.nonNull(rawRecord.getContent())) { + recordDto.setRawRecord(rawRecord); + } + + ParsedRecord parsedRecord = ParsedRecordDaoUtil.toJoinedParsedRecord(dbRecord); + if (Objects.nonNull(parsedRecord.getContent())) { + recordDto.setParsedRecord(parsedRecord); + } + ErrorRecord errorRecord = ErrorRecordDaoUtil.toJoinedErrorRecord(dbRecord); + if (Objects.nonNull(errorRecord.getContent())) { + recordDto.setErrorRecord(errorRecord); } - return record; + return recordDto; } private StrippedParsedRecord toStrippedParsedRecord(Row row) { diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ErrorRecordDaoUtil.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ErrorRecordDaoUtil.java index ff10bc84c..6e76924ed 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ErrorRecordDaoUtil.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ErrorRecordDaoUtil.java @@ -17,6 +17,7 @@ import io.vertx.core.json.JsonObject; import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; +import org.jooq.Record; /** * Utility class for managing {@link ErrorRecord} @@ -32,7 +33,7 @@ private ErrorRecordDaoUtil() { } /** * Searches for {@link ErrorRecord} by id using {@link ReactiveClassicGenericQueryExecutor} - * + * * @param queryExecutor query executor * @param id id * @return future with optional ErrorRecord @@ -45,7 +46,7 @@ public static Future> findById(ReactiveClassicGenericQuery /** * Saves {@link ErrorRecord} to the db using {@link ReactiveClassicGenericQueryExecutor} - * + * * @param queryExecutor query executor * @param errorRecord error record * @return future with updated ErrorRecord @@ -62,7 +63,7 @@ public static Future save(ReactiveClassicGenericQueryExecutor query /** * Convert database query result {@link Row} to {@link ErrorRecord} - * + * * @param row query result row * @return ErrorRecord */ @@ -76,7 +77,7 @@ public static ErrorRecord toErrorRecord(Row row) { /** * Convert database query result {@link Row} to {@link ErrorRecord} - * + * * @param row query result row * @return ErrorRecord */ @@ -91,9 +92,26 @@ public static ErrorRecord toJoinedErrorRecord(Row row) { .withDescription(row.getString(DESCRIPTION)); } + /** + * Convert database query result {@link Row} to {@link ErrorRecord} + * + * @param dbRecord query result record + * @return ErrorRecord + */ + public static ErrorRecord toJoinedErrorRecord(Record dbRecord) { + ErrorRecord errorRecord = new ErrorRecord(); + UUID id = dbRecord.get(org.folio.rest.jooq.tables.ErrorRecordsLb.ERROR_RECORDS_LB.ID); + if (Objects.nonNull(id)) { + errorRecord.withId(id.toString()); + } + return errorRecord + .withContent(dbRecord.get(ERROR_RECORD_CONTENT, String.class)) + .withDescription(dbRecord.get(org.folio.rest.jooq.tables.ErrorRecordsLb.ERROR_RECORDS_LB.DESCRIPTION)); + } + /** * Convert database query result {@link Row} to {@link Optional} {@link ErrorRecord} - * + * * @param row query result row * @return optional ErrorRecord */ @@ -103,7 +121,7 @@ public static Optional toOptionalErrorRecord(Row row) { /** * Convert {@link ErrorRecord} to database record {@link ErrorRecordsLbRecord} - * + * * @param errorRecord error record * @return ErrorRecordsLbRecord */ diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ParsedRecordDaoUtil.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ParsedRecordDaoUtil.java index 4f8979624..767c9fa7d 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ParsedRecordDaoUtil.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/ParsedRecordDaoUtil.java @@ -138,16 +138,23 @@ public static ParsedRecord toParsedRecord(Row row) { * @return ParsedRecord */ public static ParsedRecord toJoinedParsedRecord(Row row) { - ParsedRecord parsedRecord = new ParsedRecord(); UUID id = row.getUUID(ID); - if (Objects.nonNull(id)) { - parsedRecord.withId(id.toString()); - } Object content = row.getValue(PARSED_RECORD_CONTENT); - if (Objects.nonNull(content)) { - parsedRecord.withContent(normalize(content).getMap()); - } - return parsedRecord; + + return asParsedRecord(id, content); + } + + /** + * Convert database query result {@link org.jooq.Record} to {@link ParsedRecord} + * + * @param dbRecord query result record + * @return ParsedRecord + */ + public static ParsedRecord toJoinedParsedRecord(org.jooq.Record dbRecord) { + UUID id = dbRecord.get(ID_FIELD); + Object content = dbRecord.get(PARSED_RECORD_CONTENT, String.class); + + return asParsedRecord(id, content); } /** @@ -258,4 +265,15 @@ public static JsonObject normalize(Object content) { ? new JsonObject((String) content) : JsonObject.mapFrom(content); } + + private static ParsedRecord asParsedRecord(UUID id, Object content) { + ParsedRecord parsedRecord = new ParsedRecord(); + if (Objects.nonNull(id)) { + parsedRecord.withId(id.toString()); + } + if (Objects.nonNull(content)) { + parsedRecord.withContent(normalize(content).getMap()); + } + return parsedRecord; + } } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RawRecordDaoUtil.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RawRecordDaoUtil.java index 0de3850e1..d41851fbf 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RawRecordDaoUtil.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RawRecordDaoUtil.java @@ -16,6 +16,7 @@ import io.vertx.core.Future; import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; +import org.jooq.Record; /** * Utility class for managing {@link RawRecord} @@ -30,7 +31,7 @@ private RawRecordDaoUtil() { } /** * Searches for {@link RawRecord} by id using {@link ReactiveClassicGenericQueryExecutor} - * + * * @param queryExecutor query executor * @param id id * @return future with optional RawRecord @@ -43,7 +44,7 @@ public static Future> findById(ReactiveClassicGenericQueryEx /** * Saves {@link RawRecord} to the db using {@link ReactiveClassicGenericQueryExecutor} - * + * * @param queryExecutor query executor * @param rawRecord raw record * @return future with updated RawRecord @@ -60,7 +61,7 @@ public static Future save(ReactiveClassicGenericQueryExecutor queryEx /** * Convert database query result {@link Row} to {@link RawRecord} - * + * * @param row query result row * @return RawRecord */ @@ -73,7 +74,7 @@ public static RawRecord toRawRecord(Row row) { /** * Convert database query result {@link Row} to {@link RawRecord} - * + * * @param row query result row * @return RawRecord */ @@ -87,9 +88,25 @@ public static RawRecord toJoinedRawRecord(Row row) { .withContent(row.getString(RAW_RECORD_CONTENT)); } + /** + * Convert database query result {@link Record} to {@link RawRecord} + * + * @param dbRecord query result record + * @return RawRecord + */ + public static RawRecord toJoinedRawRecord(Record dbRecord) { + RawRecord rawRecord = new RawRecord(); + UUID id = dbRecord.get(org.folio.rest.jooq.tables.RawRecordsLb.RAW_RECORDS_LB.ID); + if (Objects.nonNull(id)) { + rawRecord.withId(id.toString()); + } + return rawRecord + .withContent(dbRecord.get(RAW_RECORD_CONTENT, String.class)); + } + /** * Convert database query result {@link Row} to {@link Optional} {@link RawRecord} - * + * * @param row query result row * @return optional RawRecord */ @@ -99,7 +116,7 @@ public static Optional toOptionalRawRecord(Row row) { /** * Convert {@link RawRecord} to database record {@link RawRecordsLbRecord} - * + * * @param rawRecord raw record * @return RawRecordsLbRecord */ diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordDaoUtil.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordDaoUtil.java index 6271de982..81b90611e 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordDaoUtil.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordDaoUtil.java @@ -291,31 +291,46 @@ public static SourceRecord toSourceRecord(Record record) { */ public static Record toRecord(Row row) { RecordsLb pojo = RowMappers.getRecordsLbMapper().apply(row); - Record record = new Record(); + return asRecord(pojo); + } + + /** + * Convert database query result {@link org.jooq.Record} to {@link Record} + * + * @param dbRecord query result record + * @return Record + */ + public static Record toRecord(org.jooq.Record dbRecord) { + RecordsLb pojo = RecordMappers.getDbRecordToRecordsLbMapper().apply(dbRecord); + return asRecord(pojo); + } + + private static Record asRecord(RecordsLb pojo) { + Record recordDto = new Record(); if (Objects.nonNull(pojo.getId())) { - record.withId(pojo.getId().toString()); + recordDto.withId(pojo.getId().toString()); } if (Objects.nonNull(pojo.getSnapshotId())) { - record.withSnapshotId(pojo.getSnapshotId().toString()); + recordDto.withSnapshotId(pojo.getSnapshotId().toString()); } if (Objects.nonNull(pojo.getMatchedId())) { - record.withMatchedId(pojo.getMatchedId().toString()); + recordDto.withMatchedId(pojo.getMatchedId().toString()); } if (Objects.nonNull(pojo.getRecordType())) { - record.withRecordType(Record.RecordType.valueOf(pojo.getRecordType().toString())); + recordDto.withRecordType(Record.RecordType.valueOf(pojo.getRecordType().toString())); } if (Objects.nonNull(pojo.getState())) { - record.withState(State.valueOf(pojo.getState().toString())); + recordDto.withState(State.valueOf(pojo.getState().toString())); } - record + recordDto .withOrder(pojo.getOrder()) .withGeneration(pojo.getGeneration()) .withLeaderRecordStatus(pojo.getLeaderRecordStatus()) - .withDeleted(record.getState().equals(State.DELETED) - || DELETED_LEADER_RECORD_STATUS.contains(record.getLeaderRecordStatus())); + .withDeleted(recordDto.getState().equals(State.DELETED) + || DELETED_LEADER_RECORD_STATUS.contains(recordDto.getLeaderRecordStatus())); - return record + return recordDto .withAdditionalInfo(toAdditionalInfo(pojo)) .withExternalIdsHolder(toExternalIdsHolder(pojo)) .withMetadata(toMetadata(pojo)); @@ -691,7 +706,7 @@ private static UUID toUUID(String uuid) { } private static List toUUIDs(List uuids) { - return uuids.stream().map(RecordDaoUtil::toUUID).collect(Collectors.toList()); + return uuids.stream().map(RecordDaoUtil::toUUID).toList(); } private static RecordType toRecordType(String type) { diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordMappers.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordMappers.java new file mode 100644 index 000000000..0cb50bc42 --- /dev/null +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/util/RecordMappers.java @@ -0,0 +1,39 @@ +package org.folio.dao.util; + +import org.folio.rest.jooq.tables.pojos.RecordsLb; +import org.jooq.Record; + +import java.util.function.Function; + +public final class RecordMappers { + + private RecordMappers() {} + + public static Function getDbRecordToRecordsLbMapper() { + return jooqRecord -> { + org.folio.rest.jooq.tables.pojos.RecordsLb pojo = new org.folio.rest.jooq.tables.pojos.RecordsLb(); + pojo.setId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.ID)); + pojo.setSnapshotId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.SNAPSHOT_ID)); + pojo.setMatchedId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.MATCHED_ID)); + pojo.setGeneration(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.GENERATION)); + pojo.setRecordType(java.util.Arrays.stream(org.folio.rest.jooq.enums.RecordType.values()) + .filter(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.RECORD_TYPE)::equals) + .findFirst() + .orElse(null)); + pojo.setExternalId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.EXTERNAL_ID)); + pojo.setState(java.util.Arrays.stream(org.folio.rest.jooq.enums.RecordState.values()) + .filter(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.STATE)::equals) + .findFirst() + .orElse(null)); + pojo.setLeaderRecordStatus(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.LEADER_RECORD_STATUS)); + pojo.setOrder(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.ORDER)); + pojo.setSuppressDiscovery(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.SUPPRESS_DISCOVERY)); + pojo.setCreatedByUserId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.CREATED_BY_USER_ID)); + pojo.setCreatedDate(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.CREATED_DATE)); + pojo.setUpdatedByUserId(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.UPDATED_BY_USER_ID)); + pojo.setUpdatedDate(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.UPDATED_DATE)); + pojo.setExternalHrid(jooqRecord.get(org.folio.rest.jooq.tables.RecordsLb.RECORDS_LB.EXTERNAL_HRID)); + return pojo; + }; + } +} diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java index 3506cf11c..93da75c36 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordService.java @@ -1,10 +1,12 @@ package org.folio.services; +import io.reactivex.Flowable; +import io.vertx.core.Future; +import io.vertx.sqlclient.Row; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; -import io.vertx.sqlclient.Row; import org.folio.dao.util.IdType; import org.folio.dao.util.RecordType; import org.folio.rest.jaxrs.model.FetchParsedRecordsBatchRequest; @@ -21,12 +23,10 @@ import org.folio.rest.jaxrs.model.SourceRecordCollection; import org.folio.rest.jaxrs.model.StrippedParsedRecordCollection; import org.folio.rest.jooq.enums.RecordState; +import org.folio.services.entities.RecordsModifierOperator; import org.jooq.Condition; import org.jooq.OrderField; -import io.reactivex.Flowable; -import io.vertx.core.Future; - public interface RecordService { /** @@ -83,17 +83,17 @@ public interface RecordService { Future saveRecords(RecordCollection recordsCollection, Map okapiHeaders); /** - * Saves collection of records + * Saves collection of records. * - * @param recordsCollection records to save - * @param orderedBlocking boolean indicator to control if blocking logic needs to be run sequentially (true) or - * concurrently (false) by the Verticle. + * @param externalIds external relation ids + * @param recordType record type + * @param recordsModifier records collection modifier operator * @param okapiHeaders okapi headers * @return future with response containing list of successfully saved records and error messages for records that were not saved */ - Future saveRecordsBlocking(RecordCollection recordsCollection, - boolean orderedBlocking, - Map okapiHeaders); + Future saveRecordsByExternalIds(List externalIds, RecordType recordType, + RecordsModifierOperator recordsModifier, + Map okapiHeaders); /** * Updates record with given id diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java index 0d528ac0e..6e2708803 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java @@ -11,6 +11,7 @@ import static org.folio.dao.util.RecordDaoUtil.ensureRecordHasSuppressDiscovery; import static org.folio.dao.util.RecordDaoUtil.filterRecordByExternalHridValuesWithQualifier; import static org.folio.dao.util.RecordDaoUtil.filterRecordByState; +import static org.folio.dao.util.RecordDaoUtil.getExternalIdType; import static org.folio.dao.util.RecordDaoUtil.getExternalIdsConditionWithQualifier; import static org.folio.dao.util.SnapshotDaoUtil.SNAPSHOT_NOT_FOUND_TEMPLATE; import static org.folio.dao.util.SnapshotDaoUtil.SNAPSHOT_NOT_STARTED_MESSAGE_TEMPLATE; @@ -38,10 +39,11 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; -import java.util.stream.Collectors; +import java.util.concurrent.ExecutionException; import javax.ws.rs.BadRequestException; import javax.ws.rs.NotFoundException; import net.sf.jsqlparser.JSQLParserException; +import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -73,6 +75,7 @@ import org.folio.rest.jaxrs.model.SourceRecordCollection; import org.folio.rest.jaxrs.model.StrippedParsedRecordCollection; import org.folio.rest.jooq.enums.RecordState; +import org.folio.services.entities.RecordsModifierOperator; import org.folio.services.exceptions.DuplicateRecordException; import org.folio.services.exceptions.RecordUpdateException; import org.folio.services.util.AdditionalFieldsUtil; @@ -170,28 +173,52 @@ public Future saveRecord(Record record, Map okapiHeaders @Override public Future saveRecords(RecordCollection recordCollection, Map okapiHeaders) { - return saveRecordsBlocking(recordCollection, false, okapiHeaders); - } - - @Override - public Future saveRecordsBlocking(RecordCollection recordCollection, - boolean orderedBlocking, - Map okapiHeaders) { if (recordCollection.getRecords().isEmpty()) { Promise promise = Promise.promise(); promise.complete(new RecordsBatchResponse().withTotalRecords(0)); return promise.future(); } List setMatchedIdsFutures = new ArrayList<>(); - recordCollection.getRecords().forEach(record -> setMatchedIdsFutures.add(setMatchedIdForRecord(record, - okapiHeaders.get(OKAPI_TENANT_HEADER)))); + recordCollection.getRecords().forEach(sourceRecord -> + setMatchedIdsFutures.add(setMatchedIdForRecord(sourceRecord, okapiHeaders.get(OKAPI_TENANT_HEADER)))); return GenericCompositeFuture.all(setMatchedIdsFutures) .compose(ar -> ar.succeeded() ? - recordDao.saveRecordsBlocking(recordCollection, orderedBlocking, okapiHeaders) - : Future.failedFuture(ar.cause())) + recordDao.saveRecords(recordCollection, okapiHeaders) : Future.failedFuture(ar.cause())) .recover(RecordServiceImpl::mapToDuplicateExceptionIfNeeded); } + @Override + public Future saveRecordsByExternalIds(List externalIds, + RecordType recordType, + RecordsModifierOperator recordsModifier, + Map okapiHeaders) { + if (CollectionUtils.isEmpty(externalIds)) { + LOG.warn("saveRecordsBlocking:: Skipping the records save, no external IDs are provided"); + return Future.succeededFuture(new RecordsBatchResponse().withTotalRecords(0)); + } + + if (recordsModifier == null) { + LOG.warn("saveRecordsBlocking:: Skipping the records save, no operator is provided to modify the existing records"); + return Future.succeededFuture(new RecordsBatchResponse().withTotalRecords(0)); + } + + RecordsModifierOperator recordsMatchedIdsSetter = recordCollection -> { + try { + for (var sourceRecord : recordCollection.getRecords()) { + setMatchedIdForRecord(sourceRecord, okapiHeaders.get(OKAPI_TENANT_HEADER)) + .toCompletionStage().toCompletableFuture().get(); + } + return recordCollection; + } catch (InterruptedException | ExecutionException ex) { + LOG.warn("saveRecordsBlocking:: Failed to set record matched id: {}", ex.getMessage()); + throw new RecordUpdateException(ex.getMessage()); + } + }; + RecordsModifierOperator recordsModifierWithMatchedIdsSetter = recordsModifier.andThen(recordsMatchedIdsSetter); + + return recordDao.saveRecordsByExternalIds(externalIds, recordType, recordsModifierWithMatchedIdsSetter, okapiHeaders); + } + @Override public Future updateRecord(Record record, Map okapiHeaders) { return recordDao.updateRecord(ensureRecordForeignKeys(record), okapiHeaders); @@ -387,7 +414,7 @@ private Future setMatchedIdForRecord(Record record, String tenantId) { } Promise promise = Promise.promise(); String externalId = RecordDaoUtil.getExternalId(record.getExternalIdsHolder(), record.getRecordType()); - IdType idType = RecordDaoUtil.getExternalIdType(record.getRecordType()); + IdType idType = getExternalIdType(record.getRecordType()); if (externalId != null && idType != null && record.getState() == Record.State.ACTUAL) { setMatchedIdFromExistingSourceRecord(record, tenantId, promise, externalId, idType); @@ -459,7 +486,7 @@ private void filterFieldsByDataRange(AsyncResult .map(JsonObject.class::cast) .filter(field -> checkFieldRange(field, data)) .map(JsonObject::getMap) - .collect(Collectors.toList()); + .toList(); parsedContent.put("fields", filteredFields); recordToFilter.getParsedRecord().setContent(parsedContent.getMap()); diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/entities/RecordsModifierOperator.java b/mod-source-record-storage-server/src/main/java/org/folio/services/entities/RecordsModifierOperator.java new file mode 100644 index 000000000..5dcd87483 --- /dev/null +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/entities/RecordsModifierOperator.java @@ -0,0 +1,24 @@ +package org.folio.services.entities; + +import org.folio.rest.jaxrs.model.RecordCollection; +import java.util.Objects; +import java.util.function.UnaryOperator; + +@FunctionalInterface +public +interface RecordsModifierOperator extends UnaryOperator { + + static RecordsModifierOperator identity() { + return s -> s; + } + + default RecordsModifierOperator andThen(RecordsModifierOperator after) { + Objects.requireNonNull(after); + return s -> after.apply(this.apply(s)); + } + + default RecordsModifierOperator compose(RecordsModifierOperator before) { + Objects.requireNonNull(before); + return s -> this.apply(before.apply(s)); + } +} diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/util/EventHandlingUtil.java b/mod-source-record-storage-server/src/main/java/org/folio/services/util/EventHandlingUtil.java index 8b836df5d..9c7c846c8 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/util/EventHandlingUtil.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/util/EventHandlingUtil.java @@ -28,7 +28,6 @@ import org.folio.processing.events.utils.PomReaderUtil; import org.folio.rest.jaxrs.model.Event; import org.folio.rest.jaxrs.model.EventMetadata; -import org.folio.rest.tools.utils.ModuleName; import org.folio.services.domainevent.SourceRecordDomainEventType; public final class EventHandlingUtil { @@ -102,8 +101,8 @@ public static KafkaProducerRecord createProducerRecord(String ev } public static String constructModuleName() { - return PomReaderUtil.INSTANCE.constructModuleVersionAndVersion(ModuleName.getModuleName(), - ModuleName.getModuleVersion()); + return PomReaderUtil.INSTANCE.constructModuleVersionAndVersion(PomReaderUtil.INSTANCE.getModuleName(), + PomReaderUtil.INSTANCE.getVersion()); } public static String createTopicName(String eventType, String tenantId, KafkaConfig kafkaConfig) { diff --git a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java index 643d50666..8094b3841 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/verticle/consumers/AuthorityLinkChunkConsumersVerticle.java @@ -19,7 +19,7 @@ public class AuthorityLinkChunkConsumersVerticle extends AbstractConsumerVerticl private final AuthorityLinkChunkKafkaHandler kafkaHandler; - @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:1}") + @Value("${srs.kafka.AuthorityLinkChunkConsumer.loadLimit:2}") private int loadLimit; @Autowired diff --git a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java index 7745a4de5..5a19660c9 100644 --- a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java +++ b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java @@ -148,7 +148,7 @@ public void shouldGetMarcBibRecordsBySnapshotId(TestContext context) { .filter(r -> r.getRecordType().equals(Record.RecordType.MARC_BIB)) .filter(r -> r.getSnapshotId().equals(snapshotId)) .sorted(comparing(Record::getOrder)) - .collect(Collectors.toList()); + .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareRecords(context, expected.get(1), get.result().getRecords().get(0)); compareRecords(context, expected.get(2), get.result().getRecords().get(1)); @@ -278,7 +278,7 @@ public void shouldStreamMarcBibRecordsBySnapshotId(TestContext context) { .filter(r -> r.getRecordType().equals(Record.RecordType.MARC_BIB)) .filter(r -> r.getSnapshotId().equals(snapshotId)) .sorted(comparing(Record::getOrder)) - .collect(Collectors.toList()); + .toList(); List actual = new ArrayList<>(); flowable.doFinally(() -> { @@ -1495,7 +1495,7 @@ private void getTotalRecordsAndRecordsDependsOnLimit(TestContext context, int li } List expected = records.stream() .filter(r -> r.getRecordType().equals(Record.RecordType.MARC_BIB)) - .collect(Collectors.toList()); + .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); context.assertEquals(limit, get.result().getRecords().size()); async.complete(); @@ -1525,7 +1525,7 @@ private void getRecordsBySnapshotId(TestContext context, String snapshotId, Reco .filter(r -> r.getRecordType().equals(recordType)) .filter(r -> r.getSnapshotId().equals(snapshotId)) .sorted(comparing(Record::getOrder)) - .collect(Collectors.toList()); + .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareRecords(context, expected.get(0), get.result().getRecords().get(0)); async.complete(); @@ -1553,8 +1553,8 @@ private void getMarcRecordsBetweenDates(TestContext context, OffsetDateTime earl } List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) - .sorted(comparing(Record::getOrder)) - .collect(Collectors.toList()); + //.sorted(comparing(Record::getOrder)) + .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareRecords(context, expected, get.result().getRecords()); async.complete(); @@ -1582,7 +1582,7 @@ private void streamRecordsBySnapshotId(TestContext context, String snapshotId, R .filter(r -> r.getRecordType().equals(recordType)) .filter(r -> r.getSnapshotId().equals(snapshotId)) .sorted(comparing(Record::getOrder)) - .collect(Collectors.toList()); + .toList(); List actual = new ArrayList<>(); flowable.doFinally(() -> { @@ -1686,8 +1686,8 @@ private void saveMarcRecords(TestContext context, Record.RecordType marcBib) { } context.assertEquals(0, batch.result().getErrorMessages().size()); context.assertEquals(expected.size(), batch.result().getTotalRecords()); - expected.sort(comparing(Record::getId)); - batch.result().getRecords().sort(comparing(Record::getId)); + //expected.sort(comparing(Record::getId)); + //batch.result().getRecords().sort(comparing(Record::getId)); compareRecords(context, expected, batch.result().getRecords()); RecordDaoUtil.countByCondition(postgresClientFactory.getQueryExecutor(TENANT_ID), DSL.trueCondition()) .onComplete(count -> { @@ -1717,8 +1717,8 @@ private void saveMarcRecordsWithExpectedErrors(TestContext context, Record.Recor } context.assertEquals(0, batch.result().getErrorMessages().size()); context.assertEquals(expected.size(), batch.result().getTotalRecords()); - expected.sort(comparing(Record::getId)); - batch.result().getRecords().sort(comparing(Record::getId)); + //expected.sort(comparing(Record::getId)); + //batch.result().getRecords().sort(comparing(Record::getId)); compareRecords(context, expected, batch.result().getRecords()); checkRecordErrorRecords(context, batch.result().getRecords(), TestMocks.getErrorRecord(0).getContent().toString(), TestMocks.getErrorRecord(0).getDescription()); @@ -1761,9 +1761,9 @@ private void getMarcSourceRecords(TestContext context, RecordType parsedRecordTy List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - .sorted(comparing(SourceRecord::getRecordId)) - .collect(Collectors.toList()); - get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); + //.sorted(comparing(SourceRecord::getRecordId)) + .toList(); + //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1790,14 +1790,14 @@ private void streamMarcSourceRecords(TestContext context, RecordType parsedRecor List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - .sorted(comparing(SourceRecord::getRecordId)) - .sorted(comparing(SourceRecord::getOrder)) - .collect(Collectors.toList()); + //.sorted(comparing(SourceRecord::getRecordId)) + //.sorted(comparing(SourceRecord::getOrder)) + .toList(); List actual = new ArrayList<>(); flowable.doFinally(() -> { - actual.sort(comparing(SourceRecord::getRecordId)); + //actual.sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), actual.size()); compareSourceRecords(context, expected, actual); @@ -1831,9 +1831,9 @@ private void getMarcSourceRecordsByListOfIds(TestContext context, Record.RecordT List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - .sorted(comparing(SourceRecord::getRecordId)) - .collect(Collectors.toList()); - sortByRecordId(get); + //.sorted(comparing(SourceRecord::getRecordId)) + .toList(); + //sortByRecordId(get); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1867,9 +1867,9 @@ private void getMarcSourceRecordsBetweenDates(TestContext context, Record.Record List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - .sorted(comparing(SourceRecord::getRecordId)) - .collect(Collectors.toList()); - get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); + //.sorted(comparing(SourceRecord::getRecordId)) + .toList(); + //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1922,9 +1922,9 @@ private void getMarcSourceRecordsByListOfIdsThatAreDeleted(TestContext context, List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - .sorted(comparing(SourceRecord::getRecordId)) + //.sorted(comparing(SourceRecord::getRecordId)) .collect(Collectors.toList()); - get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); + //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1996,8 +1996,8 @@ private void updateParsedMarcRecords(TestContext context, Record.RecordType reco } context.assertEquals(0, update.result().getErrorMessages().size()); context.assertEquals(expected.size(), update.result().getTotalRecords()); - expected.sort(comparing(ParsedRecord::getId)); - update.result().getParsedRecords().sort(comparing(ParsedRecord::getId)); + //expected.sort(comparing(ParsedRecord::getId)); + //update.result().getParsedRecords().sort(comparing(ParsedRecord::getId)); compareParsedRecords(context, expected, update.result().getParsedRecords()); GenericCompositeFuture.all(updated.stream().map(record -> recordDao .getRecordByMatchedId(record.getMatchedId(), TENANT_ID) @@ -2146,7 +2146,12 @@ private CompositeFuture saveRecords(List records) { private void compareRecords(TestContext context, List expected, List actual) { context.assertEquals(expected.size(), actual.size()); for (Record record : expected) { - compareRecords(context, record, record); + var actualRecord = actual.stream() + .filter(r -> Objects.equals(r.getId(), record.getId())) + .findFirst(); + if (actualRecord.isPresent()) { + compareRecords(context, record, actualRecord.get()); + } } } @@ -2211,7 +2216,12 @@ private void compareRecords(TestContext context, Record expected, StrippedParsed private void compareSourceRecords(TestContext context, List expected, List actual) { context.assertEquals(expected.size(), actual.size()); for (SourceRecord sourceRecord : expected) { - compareSourceRecords(context, sourceRecord, sourceRecord); + var sourceRecordActual = actual.stream() + .filter(sr -> Objects.equals(sr.getRecordId(), sourceRecord.getRecordId())) + .findFirst(); + if (sourceRecordActual.isPresent()) { + compareSourceRecords(context, sourceRecord, sourceRecordActual.get()); + } } } @@ -2244,7 +2254,10 @@ private void compareSourceRecords(TestContext context, SourceRecord expected, So private void compareParsedRecords(TestContext context, List expected, List actual) { context.assertEquals(expected.size(), actual.size()); for (ParsedRecord parsedRecord : expected) { - compareParsedRecords(context, parsedRecord, parsedRecord); + var actualParsedRecord = actual.stream().filter(a -> Objects.equals(a.getId(), parsedRecord.getId())).findFirst(); + if (actualParsedRecord.isPresent()) { + compareParsedRecords(context, parsedRecord, actualParsedRecord.get()); + } } } diff --git a/pom.xml b/pom.xml index 20f2e851b..7b36b98f7 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 org.folio mod-source-record-storage - 5.10.0-SNAPSHOT + 5.9.0-SNAPSHOT pom From 06d3e5c55523a3293bc8112997fc99e1b6a83666 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Thu, 14 Nov 2024 16:54:59 +0500 Subject: [PATCH 11/19] modsource-817: refactor --- mod-source-record-storage-client/pom.xml | 2 +- mod-source-record-storage-server/pom.xml | 2 +- .../consumers/AuthorityLinkChunkKafkaHandler.java | 3 ++- .../src/main/java/org/folio/dao/RecordDaoImpl.java | 13 ------------- 4 files changed, 4 insertions(+), 16 deletions(-) diff --git a/mod-source-record-storage-client/pom.xml b/mod-source-record-storage-client/pom.xml index a3df89d30..6be29123e 100644 --- a/mod-source-record-storage-client/pom.xml +++ b/mod-source-record-storage-client/pom.xml @@ -6,7 +6,7 @@ org.folio mod-source-record-storage - 5.9.0-SNAPSHOT + 5.10.0-SNAPSHOT diff --git a/mod-source-record-storage-server/pom.xml b/mod-source-record-storage-server/pom.xml index f94f01671..34f022480 100644 --- a/mod-source-record-storage-server/pom.xml +++ b/mod-source-record-storage-server/pom.xml @@ -5,7 +5,7 @@ org.folio mod-source-record-storage - 5.9.0-SNAPSHOT + 5.10.0-SNAPSHOT diff --git a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java index aa91995dc..60f786c06 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/consumers/AuthorityLinkChunkKafkaHandler.java @@ -85,8 +85,9 @@ public AuthorityLinkChunkKafkaHandler(RecordService recordService, KafkaConfig k @Override public Future handle(KafkaConsumerRecord consumerRecord) { - var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); + LOGGER.trace("handle:: Handling kafka record: {}", consumerRecord); LOGGER.info("handle:: Start Handling kafka record"); + var userId = extractHeaderValue(XOkapiHeaders.USER_ID, consumerRecord.headers()); var result = mapToEvent(consumerRecord) .compose(this::createSnapshot) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index aba859d5c..b890c73d6 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -614,19 +614,6 @@ private void appendWhere(SelectJoinStep step, ParseLeaderResult parseLeaderResul .and(RECORDS_LB.EXTERNAL_ID.isNotNull()); } - private void appendJoinAlternative(SelectJoinStep selectJoinStep, ParseLeaderResult parseLeaderResult, ParseFieldsResult parseFieldsResult) { - if (parseLeaderResult.isEnabled()) { - Table marcIndexersLeader = table(name("marc_indexers_leader")); - selectJoinStep.innerJoin(marcIndexersLeader).on(RECORDS_LB.ID.eq(field(TABLE_FIELD_TEMPLATE, UUID.class, marcIndexersLeader, name(MARC_ID)))); - } - if (parseFieldsResult.isEnabled()) { - parseFieldsResult.getFieldsToJoin().forEach(fieldToJoin -> { - Table marcIndexers = table(name(MARC_INDEXERS_PARTITION_PREFIX + fieldToJoin)).as("i" + fieldToJoin); - selectJoinStep.innerJoin(marcIndexers).on(RECORDS_LB.ID.eq(field(TABLE_FIELD_TEMPLATE, UUID.class, marcIndexers, name(MARC_ID)))); - }); - } - } - @Override public Future getMatchedRecordsIdentifiers(MatchField matchedField, Filter.ComparisonPartType comparisonPartType, boolean returnTotalRecords, TypeConnection typeConnection, From 890e9a65d9dbdc02e6ab375d3568b4c211657c13 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Thu, 14 Nov 2024 17:20:00 +0500 Subject: [PATCH 12/19] MODSOURCE-817: update pom and refactor --- .../src/main/java/org/folio/dao/RecordDaoImpl.java | 6 +++--- pom.xml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index b890c73d6..6e242efec 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -725,10 +725,10 @@ public Future saveRecord(Record record, Map okapiHeaders } @Override - public Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Record sourceRecord, + public Future saveRecord(ReactiveClassicGenericQueryExecutor txQE, Record recordDto, Map okapiHeaders) { - LOG.trace("saveRecord:: Saving {} record {}", sourceRecord.getRecordType(), sourceRecord.getId()); - return insertOrUpdateRecord(txQE, sourceRecord) + LOG.trace("saveRecord:: Saving {} record {}", recordDto.getRecordType(), recordDto.getId()); + return insertOrUpdateRecord(txQE, recordDto) .onSuccess(created -> recordDomainEventPublisher.publishRecordCreated(created, okapiHeaders)); } diff --git a/pom.xml b/pom.xml index 7b36b98f7..20f2e851b 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 org.folio mod-source-record-storage - 5.9.0-SNAPSHOT + 5.10.0-SNAPSHOT pom From 207ce8f0d49b7747d036769ef2c01ed4b30cf4e0 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Fri, 22 Nov 2024 16:28:30 +0500 Subject: [PATCH 13/19] MODSOURCE-817: refactor --- .../main/java/org/folio/services/RecordServiceImpl.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java index 6e2708803..46b29ca0b 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java @@ -179,11 +179,12 @@ public Future saveRecords(RecordCollection recordCollectio return promise.future(); } List setMatchedIdsFutures = new ArrayList<>(); - recordCollection.getRecords().forEach(sourceRecord -> - setMatchedIdsFutures.add(setMatchedIdForRecord(sourceRecord, okapiHeaders.get(OKAPI_TENANT_HEADER)))); + recordCollection.getRecords().forEach(record -> setMatchedIdsFutures.add(setMatchedIdForRecord(record, + okapiHeaders.get(OKAPI_TENANT_HEADER)))); return GenericCompositeFuture.all(setMatchedIdsFutures) .compose(ar -> ar.succeeded() ? - recordDao.saveRecords(recordCollection, okapiHeaders) : Future.failedFuture(ar.cause())) + recordDao.saveRecords(recordCollection, okapiHeaders) + : Future.failedFuture(ar.cause())) .recover(RecordServiceImpl::mapToDuplicateExceptionIfNeeded); } From b645f282d42c0f00adb54249521229f78514670f Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Fri, 22 Nov 2024 18:52:39 +0500 Subject: [PATCH 14/19] MODSOURCE-817: fix test --- .../ParsedRecordChunkConsumersVerticleTest.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java b/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java index 010cda28c..11f9ee7cf 100644 --- a/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java +++ b/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.nio.charset.Charset; +import java.util.Arrays; import java.util.UUID; import java.util.Date; import java.util.List; @@ -174,7 +175,9 @@ public void shouldSendDIErrorEventsWhenParsedRecordChunkWasNotSaved() throws Int sendRecordsToKafka(jobExecutionId, records); - check_DI_ERROR_eventsSent(jobExecutionId, records, "ERROR: insert or update on table \"raw_records_lb\" violates foreign key constraint \"fk_raw_records_records\"" ); + check_DI_ERROR_eventsSent(jobExecutionId, records, + "ERROR: insert or update on table \"raw_records_lb\" violates foreign key constraint \"fk_raw_records_records\"", + "ERROR: insert or update on table \"marc_records_lb\" violates foreign key constraint \"fk_marc_records_records\""); } @Test @@ -300,9 +303,7 @@ private void check_DI_ERROR_eventsSent(String jobExecutionId, List recor assertEquals(DI_ERROR.value(), eventPayload.getEventType()); assertEquals(TENANT_ID, eventPayload.getTenant()); assertTrue(StringUtils.isNotBlank(recordId)); - for (String errorMessage: errorMessages) { - assertTrue(error.contains(errorMessage)); - } + assertTrue(Arrays.asList(errorMessages).contains(error)); assertFalse(eventPayload.getEventsChain().isEmpty()); assertEquals(DI_LOG_SRS_MARC_BIB_RECORD_CREATED.value(), eventPayload.getEventsChain().get(0)); } From 83c6176ed2d6b793a1589869ec0cf9d33733dc14 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Fri, 22 Nov 2024 20:24:15 +0500 Subject: [PATCH 15/19] MODSOURCE-817: fix test --- .../ParsedRecordChunkConsumersVerticleTest.java | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java b/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java index 11f9ee7cf..3fa774b8c 100644 --- a/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java +++ b/mod-source-record-storage-server/src/test/java/org/folio/verticle/consumers/ParsedRecordChunkConsumersVerticleTest.java @@ -5,6 +5,7 @@ import io.vertx.core.json.JsonObject; import io.vertx.ext.unit.Async; import io.vertx.ext.unit.TestContext; +import io.vertx.ext.unit.junit.RunTestOnContext; import io.vertx.ext.unit.junit.VertxUnitRunner; import net.mguenther.kafka.junit.KeyValue; import net.mguenther.kafka.junit.ObserveKeyValues; @@ -30,13 +31,13 @@ import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.MockitoAnnotations; import java.io.IOException; import java.nio.charset.Charset; -import java.util.Arrays; import java.util.UUID; import java.util.Date; import java.util.List; @@ -63,6 +64,9 @@ public class ParsedRecordChunkConsumersVerticleTest extends AbstractLBServiceTes private static String recordId = UUID.randomUUID().toString(); + @Rule + public RunTestOnContext rule = new RunTestOnContext(); + private static RawRecord rawMarcRecord; private static ParsedRecord parsedMarcRecord; @@ -167,7 +171,7 @@ private void sendEventWithSavedMarcRecordCollectionPayloadAfterProcessingParsedR } @Test - public void shouldSendDIErrorEventsWhenParsedRecordChunkWasNotSaved() throws InterruptedException { + public void shouldSendDIErrorEventsWhenParsedRecordChunkWasNotSaved(TestContext context) throws InterruptedException { Record validRecord = TestMocks.getRecord(0).withSnapshotId(snapshotId); Record additionalRecord = getAdditionalRecord(validRecord, snapshotId, validRecord.getRecordType()); List records = List.of(validRecord, additionalRecord); @@ -176,8 +180,7 @@ public void shouldSendDIErrorEventsWhenParsedRecordChunkWasNotSaved() throws Int sendRecordsToKafka(jobExecutionId, records); check_DI_ERROR_eventsSent(jobExecutionId, records, - "ERROR: insert or update on table \"raw_records_lb\" violates foreign key constraint \"fk_raw_records_records\"", - "ERROR: insert or update on table \"marc_records_lb\" violates foreign key constraint \"fk_marc_records_records\""); + "ERROR: insert or update on table \"raw_records_lb\" violates foreign key constraint \"fk_raw_records_records\""); } @Test @@ -303,7 +306,9 @@ private void check_DI_ERROR_eventsSent(String jobExecutionId, List recor assertEquals(DI_ERROR.value(), eventPayload.getEventType()); assertEquals(TENANT_ID, eventPayload.getTenant()); assertTrue(StringUtils.isNotBlank(recordId)); - assertTrue(Arrays.asList(errorMessages).contains(error)); + for (String errorMessage: errorMessages) { + assertTrue(error.contains(errorMessage)); + } assertFalse(eventPayload.getEventsChain().isEmpty()); assertEquals(DI_LOG_SRS_MARC_BIB_RECORD_CREATED.value(), eventPayload.getEventsChain().get(0)); } From 50ea5c6cfcdcda17b814ce71647041a347bc8b31 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Mon, 25 Nov 2024 15:58:48 +0500 Subject: [PATCH 16/19] MODSOURCE-817: fix test --- .../src/main/java/org/folio/dao/RecordDaoImpl.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index 42f5a900a..a044cd39e 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -887,10 +887,13 @@ private RecordsBatchResponse saveRecords(RecordCollection recordCollection, Stri dbParsedRecords.add(dbParsedRecord); } catch (Exception e) { // create error record and remove from record + Object content = Optional.ofNullable(record.getParsedRecord()) + .map(ParsedRecord::getContent) + .orElse(null); var errorRecord = new ErrorRecord() .withId(record.getId()) .withDescription(e.getMessage()) - .withContent(record.getParsedRecord().getContent()); + .withContent(content); errorMessages.add(format(INVALID_PARSED_RECORD_MESSAGE_TEMPLATE, record.getId(), e.getMessage())); record.withErrorRecord(errorRecord) .withParsedRecord(null) From 4edac7f296962bfd2ad2768dac7a672a5c13155b Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Mon, 25 Nov 2024 19:17:55 +0500 Subject: [PATCH 17/19] MODSOURCE-817: fix test --- .../java/org/folio/dao/RecordDaoImpl.java | 8 +++--- .../exceptions/RecordUpdateException.java | 4 +++ .../org/folio/services/RecordServiceTest.java | 25 +------------------ 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index a044cd39e..c5e172277 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -119,6 +119,7 @@ import org.folio.services.RecordSearchParameters; import org.folio.services.domainevent.RecordDomainEventPublisher; import org.folio.services.entities.RecordsModifierOperator; +import org.folio.services.exceptions.RecordUpdateException; import org.folio.services.util.TypeConnection; import org.folio.services.util.parser.ParseFieldsResult; import org.folio.services.util.parser.ParseLeaderResult; @@ -1017,9 +1018,10 @@ private RecordsBatchResponse saveRecords(RecordCollection recordCollection, Stri } catch (DuplicateEventException e) { LOG.info("saveRecords:: Skipped saving records due to duplicate event: {}", e.getMessage()); throw e; - } catch (SQLException | DataAccessException e) { - LOG.warn("saveRecords:: Failed to save records", e); - throw e; + } catch (SQLException | DataAccessException ex) { + LOG.warn("saveRecords:: Failed to save records", ex); + Throwable throwable = ex.getCause() != null ? ex.getCause() : ex; + throw new RecordUpdateException(ex.getMessage(), throwable); } } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java b/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java index 69232837c..574b9cf34 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java @@ -8,4 +8,8 @@ public class RecordUpdateException extends RuntimeException { public RecordUpdateException(String message) { super(message); } + + public RecordUpdateException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java index 5a19660c9..e3178205a 100644 --- a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java +++ b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java @@ -1553,7 +1553,6 @@ private void getMarcRecordsBetweenDates(TestContext context, OffsetDateTime earl } List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) - //.sorted(comparing(Record::getOrder)) .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareRecords(context, expected, get.result().getRecords()); @@ -1686,8 +1685,6 @@ private void saveMarcRecords(TestContext context, Record.RecordType marcBib) { } context.assertEquals(0, batch.result().getErrorMessages().size()); context.assertEquals(expected.size(), batch.result().getTotalRecords()); - //expected.sort(comparing(Record::getId)); - //batch.result().getRecords().sort(comparing(Record::getId)); compareRecords(context, expected, batch.result().getRecords()); RecordDaoUtil.countByCondition(postgresClientFactory.getQueryExecutor(TENANT_ID), DSL.trueCondition()) .onComplete(count -> { @@ -1717,8 +1714,6 @@ private void saveMarcRecordsWithExpectedErrors(TestContext context, Record.Recor } context.assertEquals(0, batch.result().getErrorMessages().size()); context.assertEquals(expected.size(), batch.result().getTotalRecords()); - //expected.sort(comparing(Record::getId)); - //batch.result().getRecords().sort(comparing(Record::getId)); compareRecords(context, expected, batch.result().getRecords()); checkRecordErrorRecords(context, batch.result().getRecords(), TestMocks.getErrorRecord(0).getContent().toString(), TestMocks.getErrorRecord(0).getDescription()); @@ -1761,9 +1756,7 @@ private void getMarcSourceRecords(TestContext context, RecordType parsedRecordTy List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - //.sorted(comparing(SourceRecord::getRecordId)) .toList(); - //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1790,14 +1783,10 @@ private void streamMarcSourceRecords(TestContext context, RecordType parsedRecor List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - //.sorted(comparing(SourceRecord::getRecordId)) - //.sorted(comparing(SourceRecord::getOrder)) .toList(); List actual = new ArrayList<>(); flowable.doFinally(() -> { - - //actual.sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), actual.size()); compareSourceRecords(context, expected, actual); @@ -1831,9 +1820,7 @@ private void getMarcSourceRecordsByListOfIds(TestContext context, Record.RecordT List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - //.sorted(comparing(SourceRecord::getRecordId)) .toList(); - //sortByRecordId(get); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1841,10 +1828,6 @@ private void getMarcSourceRecordsByListOfIds(TestContext context, Record.RecordT }); } - private void sortByRecordId(AsyncResult get) { - get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); - } - private void getMarcSourceRecordsBetweenDates(TestContext context, Record.RecordType recordType, RecordType parsedRecordType, OffsetDateTime earliestDate, OffsetDateTime latestDate) { @@ -1867,9 +1850,7 @@ private void getMarcSourceRecordsBetweenDates(TestContext context, Record.Record List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - //.sorted(comparing(SourceRecord::getRecordId)) .toList(); - //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1922,9 +1903,7 @@ private void getMarcSourceRecordsByListOfIdsThatAreDeleted(TestContext context, List expected = records.stream() .filter(r -> r.getRecordType().equals(recordType)) .map(RecordDaoUtil::toSourceRecord) - //.sorted(comparing(SourceRecord::getRecordId)) - .collect(Collectors.toList()); - //get.result().getSourceRecords().sort(comparing(SourceRecord::getRecordId)); + .toList(); context.assertEquals(expected.size(), get.result().getTotalRecords()); compareSourceRecords(context, expected, get.result().getSourceRecords()); async.complete(); @@ -1996,8 +1975,6 @@ private void updateParsedMarcRecords(TestContext context, Record.RecordType reco } context.assertEquals(0, update.result().getErrorMessages().size()); context.assertEquals(expected.size(), update.result().getTotalRecords()); - //expected.sort(comparing(ParsedRecord::getId)); - //update.result().getParsedRecords().sort(comparing(ParsedRecord::getId)); compareParsedRecords(context, expected, update.result().getParsedRecords()); GenericCompositeFuture.all(updated.stream().map(record -> recordDao .getRecordByMatchedId(record.getMatchedId(), TENANT_ID) From 969bd509e1f4f571c102b752ea6fe766166e2543 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Mon, 25 Nov 2024 19:52:59 +0500 Subject: [PATCH 18/19] MODSOURCE-817: fix test --- .../src/main/java/org/folio/dao/RecordDaoImpl.java | 2 +- .../org/folio/services/exceptions/RecordUpdateException.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index c5e172277..4f82da0e2 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -1021,7 +1021,7 @@ private RecordsBatchResponse saveRecords(RecordCollection recordCollection, Stri } catch (SQLException | DataAccessException ex) { LOG.warn("saveRecords:: Failed to save records", ex); Throwable throwable = ex.getCause() != null ? ex.getCause() : ex; - throw new RecordUpdateException(ex.getMessage(), throwable); + throw new RecordUpdateException(throwable); } } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java b/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java index 574b9cf34..8ae129f63 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/exceptions/RecordUpdateException.java @@ -9,7 +9,7 @@ public RecordUpdateException(String message) { super(message); } - public RecordUpdateException(String message, Throwable cause) { - super(message, cause); + public RecordUpdateException(Throwable cause) { + super(cause); } } From 80c68478e580cd34c40c677b0ececc148f8fa269 Mon Sep 17 00:00:00 2001 From: Mukhiddin Yusupov Date: Tue, 26 Nov 2024 17:54:01 +0500 Subject: [PATCH 19/19] MODSOURCE-817: fix sonar quality gate issues --- .../java/org/folio/dao/RecordDaoImpl.java | 22 +++++++++---------- .../org/folio/services/RecordServiceImpl.java | 12 ++++++---- .../org/folio/services/RecordServiceTest.java | 2 -- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java index 4f82da0e2..d787feeae 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/dao/RecordDaoImpl.java @@ -956,14 +956,14 @@ private RecordsBatchResponse saveRecords(RecordCollection recordCollection, Stri .commitAfter(1000) .onErrorAbort() .loadRecords(dbRecords.stream() - .map(record -> { - Integer generation = matchedGenerations.get(record.getMatchedId()); + .map(recordDto -> { + Integer generation = matchedGenerations.get(recordDto.getMatchedId()); if (Objects.nonNull(generation)) { - record.setGeneration(generation + 1); - } else if (Objects.isNull(record.getGeneration())) { - record.setGeneration(0); + recordDto.setGeneration(generation + 1); + } else if (Objects.isNull(recordDto.getGeneration())) { + recordDto.setGeneration(0); } - return record; + return recordDto; }) .toList()) .fieldsCorresponding() @@ -1025,14 +1025,14 @@ private RecordsBatchResponse saveRecords(RecordCollection recordCollection, Stri } } - private void validateRecordType(Record record, RecordType recordType) { - if (record.getRecordType() == null) { - var error = record.getErrorRecord() != null ? record.getErrorRecord().getDescription() : ""; + private void validateRecordType(Record recordDto, RecordType recordType) { + if (recordDto.getRecordType() == null) { + var error = recordDto.getErrorRecord() != null ? recordDto.getErrorRecord().getDescription() : ""; throw new BadRequestException( - StringUtils.defaultIfEmpty(error, String.format("Record with id %s has not record type", record.getId()))); + StringUtils.defaultIfEmpty(error, String.format("Record with id %s has not record type", recordDto.getId()))); } - if (RecordType.valueOf(record.getRecordType().name()) != recordType) { + if (RecordType.valueOf(recordDto.getRecordType().name()) != recordType) { throw new BadRequestException("Batch record collection only supports single record type"); } } diff --git a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java index 46b29ca0b..455ec707b 100644 --- a/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java +++ b/mod-source-record-storage-server/src/main/java/org/folio/services/RecordServiceImpl.java @@ -194,12 +194,12 @@ public Future saveRecordsByExternalIds(List extern RecordsModifierOperator recordsModifier, Map okapiHeaders) { if (CollectionUtils.isEmpty(externalIds)) { - LOG.warn("saveRecordsBlocking:: Skipping the records save, no external IDs are provided"); + LOG.warn("saveRecordsByExternalIds:: Skipping the records save, no external IDs are provided"); return Future.succeededFuture(new RecordsBatchResponse().withTotalRecords(0)); } if (recordsModifier == null) { - LOG.warn("saveRecordsBlocking:: Skipping the records save, no operator is provided to modify the existing records"); + LOG.warn("saveRecordsByExternalIds:: Skipping the records save, no operator is provided to modify the existing records"); return Future.succeededFuture(new RecordsBatchResponse().withTotalRecords(0)); } @@ -210,8 +210,12 @@ public Future saveRecordsByExternalIds(List extern .toCompletionStage().toCompletableFuture().get(); } return recordCollection; - } catch (InterruptedException | ExecutionException ex) { - LOG.warn("saveRecordsBlocking:: Failed to set record matched id: {}", ex.getMessage()); + } catch (InterruptedException ex) { + LOG.warn("saveRecordsByExternalIds:: Setting record matched id is interrupted: {}", ex.getMessage()); + Thread.currentThread().interrupt(); + throw new RecordUpdateException(ex.getMessage()); + } catch (ExecutionException ex) { + LOG.warn("saveRecordsByExternalIds:: Failed to set record matched id: {}", ex.getMessage()); throw new RecordUpdateException(ex.getMessage()); } }; diff --git a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java index e3178205a..c71751844 100644 --- a/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java +++ b/mod-source-record-storage-server/src/test/java/org/folio/services/RecordServiceTest.java @@ -12,7 +12,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import io.reactivex.Flowable; -import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.json.JsonArray; @@ -62,7 +61,6 @@ import org.folio.rest.jaxrs.model.RecordsBatchResponse; import org.folio.rest.jaxrs.model.Snapshot; import org.folio.rest.jaxrs.model.SourceRecord; -import org.folio.rest.jaxrs.model.SourceRecordCollection; import org.folio.rest.jaxrs.model.StrippedParsedRecord; import org.folio.rest.jooq.enums.RecordState; import org.folio.services.domainevent.RecordDomainEventPublisher;