Skip to content

Commit

Permalink
fix: SynchronizeRulesAndMetadataDao
Browse files Browse the repository at this point in the history
  • Loading branch information
Nolife999 committed Oct 3, 2023
1 parent 97d78c5 commit 72a691f
Show file tree
Hide file tree
Showing 10 changed files with 264 additions and 222 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import fr.insee.arc.core.service.global.ApiService;
import fr.insee.arc.core.service.p0initialisation.filesystem.RestoreFileSystem;
import fr.insee.arc.core.service.p0initialisation.metadata.SynchronizeUserRulesAndMetadata;
import fr.insee.arc.core.service.p0initialisation.metadata.SynchronizeRulesAndMetadataOperation;
import fr.insee.arc.core.service.p0initialisation.pilotage.CleanPilotageOperation;
import fr.insee.arc.core.service.p0initialisation.pilotage.SynchronizeDataByPilotageOperation;
import fr.insee.arc.core.service.p0initialisation.useroperation.ReplayOrDeleteFilesOperation;
Expand Down Expand Up @@ -46,7 +46,7 @@ public void executer() throws ArcException {
// Recopie/remplace les règles définie par l'utilisateur (table de ihm_) dans
// l'environnement d'excécution courant
// mettre à jour les tables métier avec les paramêtres de la famille de norme
new SynchronizeUserRulesAndMetadata(this.coordinatorSandbox).synchroniserSchemaExecutionAllNods();
new SynchronizeRulesAndMetadataOperation(this.coordinatorSandbox).synchroniserSchemaExecutionAllNods();

// marque les fichiers ou les archives à rejouer
// efface des fichiers de la table de pilotage marqués par l'utilisateur comme
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,11 @@
import fr.insee.arc.utils.ressourceUtils.PropertiesHandler;

public class BuildFileSystem {

/**
* Build the file system required for arc to proceed for a list of given sandboxes @param envExecutions
* Build the file system required for arc to proceed for a list of given
* sandboxes @param envExecutions
*
* @param connexion
* @param envExecutions
*/
Expand All @@ -24,12 +26,9 @@ public BuildFileSystem(Connection connexion, String[] envExecutions) {
}

private Connection connexion;

private String[] envExecutions;





/**
* Build directories for the sandbox
*
Expand All @@ -53,10 +52,10 @@ public void execute() {

FileUtilsArc.createDirIfNotexist(DirectoryPath
.directoryReceptionEtatEnCours(properties.getBatchParametersDirectory(), envExecution));
FileUtilsArc.createDirIfNotexist(DirectoryPath
.directoryReceptionEtatOK(properties.getBatchParametersDirectory(), envExecution));
FileUtilsArc.createDirIfNotexist(DirectoryPath
.directoryReceptionEtatKO(properties.getBatchParametersDirectory(), envExecution));
FileUtilsArc.createDirIfNotexist(
DirectoryPath.directoryReceptionEtatOK(properties.getBatchParametersDirectory(), envExecution));
FileUtilsArc.createDirIfNotexist(
DirectoryPath.directoryReceptionEtatKO(properties.getBatchParametersDirectory(), envExecution));
}

} catch (ArcException ex) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
package fr.insee.arc.core.service.p0initialisation.metadata;

import java.sql.Connection;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Optional;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import fr.insee.arc.core.service.global.bo.JeuDeRegle;
import fr.insee.arc.core.service.global.bo.JeuDeRegleDao;
import fr.insee.arc.core.service.global.bo.Sandbox;
import fr.insee.arc.core.service.global.scalability.ServiceScalability;
import fr.insee.arc.core.service.p0initialisation.dbmaintenance.BddPatcher;
import fr.insee.arc.core.service.p0initialisation.metadata.dao.SynchronizeRulesAndMetadataDao;
import fr.insee.arc.core.service.p5mapping.engine.ExpressionService;
import fr.insee.arc.utils.consumer.ThrowingConsumer;
import fr.insee.arc.utils.dao.CopyObjectsToDatabase;
import fr.insee.arc.utils.dao.UtilitaireDao;
import fr.insee.arc.utils.exception.ArcException;
import fr.insee.arc.utils.ressourceUtils.PropertiesHandler;
import fr.insee.arc.utils.structure.GenericBean;
import fr.insee.arc.utils.utils.LoggerHelper;

public class SynchronizeRulesAndMetadataOperation {

private static final Logger LOGGER = LogManager.getLogger(SynchronizeRulesAndMetadataOperation.class);

public SynchronizeRulesAndMetadataOperation(Sandbox sandbox) {
super();
this.sandbox = sandbox;
this.dao = new SynchronizeRulesAndMetadataDao(sandbox);
}

private Sandbox sandbox;

private SynchronizeRulesAndMetadataDao dao;

/**
* Recopie/remplace les règles définie par l'utilisateur (table de ihm_) dans
* Met à jour le schéma des tables métiers correspondant aux règles définies
* dans les familles
*
* @param connexion
* @param envParameters
* @param envExecution
* @throws ArcException
*/
public void synchroniserSchemaExecutionAllNods() throws ArcException {

copyMetadataAllNods();

mettreAJourSchemaTableMetierOnNods();
}

/**
* Recopie/remplace les règles définie par l'utilisateur (table de ihm_) dans
* l'environnement d'excécution courant sur tous les noeuds postgres
* (coordinator et executors)
*
* @param connexion
* @param envParameters
* @param envExecution
* @throws ArcException
*/
public void copyMetadataAllNods() throws ArcException {

// on coordinator nod - copy the metadata user rules from metadata schema to sandbox schema
copyMetadataToSandbox();

// copy the rules in sandbox schema of the coordinator nod to the sandbox schema of the executor nods
copyMetadataToExecutorsAllNods();
}

/**
* Recopier les tables de l'environnement de parametres (IHM) vers
* l'environnement d'execution (batch, bas, ...)
*
* @param connexion
* @param anParametersEnvironment
* @param anExecutionEnvironment
* @throws ArcException
*/
private void copyMetadataToSandbox() throws ArcException {
dao.copyRulesTablesToExecution();
applyExpressions();
}

/**
* Instanciate the metadata required into all executors pod
*
* @param envExecution
* @throws ArcException
*/
protected int copyMetadataToExecutorsAllNods() throws ArcException {

Connection coordinatorConnexion = sandbox.getConnection();
String envExecution = sandbox.getSchema();

ThrowingConsumer<Connection, ArcException> onCoordinator = c -> {
};

ThrowingConsumer<Connection, ArcException> onExecutor = executorConnection -> {
copyMetaDataToExecutors(coordinatorConnexion, executorConnection, envExecution);
};

return ServiceScalability.dispatchOnNods(coordinatorConnexion, onCoordinator, onExecutor);

}

/**
* Instanciate the metadata required into the given executor pod
*
* @param coordinatorConnexion
* @param executorConnection
* @param envExecution
* @throws ArcException
*/
private static void copyMetaDataToExecutors(Connection coordinatorConnexion, Connection executorConnection,
String envExecution) throws ArcException {
PropertiesHandler properties = PropertiesHandler.getInstance();

// add utility functions
BddPatcher.executeBddScript(executorConnection, "BdD/script_function_utility.sql",
properties.getDatabaseRestrictedUsername(), null, null);

// add tables for phases if required
BddPatcher.bddScriptEnvironmentExecutor(executorConnection, properties.getDatabaseRestrictedUsername(),
new String[] { envExecution });

// copy tables

ArrayList<String> tablesToCopyIntoExecutor = BddPatcher.retrieveRulesTablesFromSchema(coordinatorConnexion,
envExecution);
tablesToCopyIntoExecutor
.addAll(BddPatcher.retrieveExternalTablesUsedInRules(coordinatorConnexion, envExecution));
tablesToCopyIntoExecutor.addAll(BddPatcher.retrieveModelTablesFromSchema(coordinatorConnexion, envExecution));

for (String table : new HashSet<String>(tablesToCopyIntoExecutor)) {

GenericBean gb = SynchronizeRulesAndMetadataDao.execQuerySelectDataFrom(coordinatorConnexion, table);

CopyObjectsToDatabase.execCopyFromGenericBean(executorConnection, table, gb);
}
}

/**
* replace an expression in rules
*
* @param connexion
* @param anExecutionEnvironment
* @throws ArcException
*/
private void applyExpressions() throws ArcException {

Connection connexion = sandbox.getConnection();
String anExecutionEnvironment = sandbox.getSchema();

// Checks expression validity
ExpressionService expressionService = new ExpressionService();
ArrayList<JeuDeRegle> allRuleSets = JeuDeRegleDao.recupJeuDeRegle(connexion,
anExecutionEnvironment + ".jeuderegle");
for (JeuDeRegle ruleSet : allRuleSets) {
// Check
GenericBean expressions = expressionService.fetchExpressions(connexion, anExecutionEnvironment, ruleSet);
if (expressions.isEmpty()) {
continue;
}

Optional<String> loopInExpressionSet = expressionService.loopInExpressionSet(expressions);
if (loopInExpressionSet.isPresent()) {
LoggerHelper.info(LOGGER, "A loop is present in the expression set : " + loopInExpressionSet.get());
LoggerHelper.info(LOGGER, "The expression set is not applied");
continue;
}

// Apply
expressions = expressionService.fetchOrderedExpressions(connexion, anExecutionEnvironment, ruleSet);
if (expressionService.isExpressionSyntaxPresentInControl(connexion, anExecutionEnvironment, ruleSet)) {
UtilitaireDao.get(0).executeRequest(connexion,
expressionService.applyExpressionsToControl(ruleSet, expressions, anExecutionEnvironment));
}
if (expressionService.isExpressionSyntaxPresentInMapping(connexion, anExecutionEnvironment, ruleSet)) {
UtilitaireDao.get(0).executeRequest(connexion,
expressionService.applyExpressionsToMapping(ruleSet, expressions, anExecutionEnvironment));
}
}

}


private void mettreAJourSchemaTableMetierOnNods() throws ArcException {

Connection coordinatorConnexion = sandbox.getConnection();
String envExecution = sandbox.getSchema();

ThrowingConsumer<Connection, ArcException> function = executorConnection -> {
SynchronizeRulesAndMetadataDao.mettreAJourSchemaTableMetier(executorConnection, envExecution);
};

ServiceScalability.dispatchOnNods(coordinatorConnexion, function, function);

}



}
Loading

0 comments on commit 72a691f

Please sign in to comment.