Skip to content

Commit

Permalink
refactor ApiInitialisation
Browse files Browse the repository at this point in the history
  • Loading branch information
Nolife999 committed Sep 25, 2023
1 parent e766062 commit 70e7ac6
Show file tree
Hide file tree
Showing 21 changed files with 210 additions and 211 deletions.
32 changes: 13 additions & 19 deletions arc-batch/src/main/java/fr/insee/arc/batch/BatchARC.java
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,6 @@ class BatchARC implements IReturnCode {
// reste à faire
private Integer numberOfIterationBewteenCheckTodo;

// nombre de pods utilisés par ARC
private Integer numberOfPods;

// true = the batch will resume the process from a formerly interrupted batch
// false = the batch will proceed to a new load
// Maintenance initialization process can only occur in this case
Expand Down Expand Up @@ -142,9 +139,6 @@ private void initParameters() {
numberOfIterationBewteenCheckTodo = bdParameters.getInt(null, "LanceurARC.DATABASE_CHECKTODO_ROUTINE_INTERVAL",
10);

// the number of executor nods declared for scalability
numberOfPods = ArcDatabase.numberOfExecutorNods();

// either we take env and envExecution from database or properties
// default is from properties
if (Boolean.parseBoolean(bdParameters.getString(null, "LanceurARC.envFromDatabase", "false"))) {
Expand Down Expand Up @@ -240,13 +234,10 @@ private void maintenanceTablePilotageBatch() throws ArcException {
requete.append(
"\n insert into arc.pilotage_batch select '1900-01-01:00','O' where not exists (select 1 from arc.pilotage_batch); ");
UtilitaireDao.get(ArcDatabase.COORDINATOR.getIndex()).executeRequest(null, requete);

for (int poolIndex = 0; poolIndex <= numberOfPods; poolIndex++) {
// Maintenance full du catalog
DatabaseMaintenance.maintenancePgCatalog(poolIndex, null, FormatSQL.VACUUM_OPTION_FULL);
// maintenance des tables métier de la base de données
DatabaseMaintenance.maintenanceDatabaseClassic(poolIndex, null, envExecution);
}

DatabaseMaintenance.maintenancePgCatalogAllNods(null, FormatSQL.VACUUM_OPTION_FULL);
DatabaseMaintenance.maintenancePilotage(null, envExecution, FormatSQL.VACUUM_OPTION_NONE);

}

/**
Expand Down Expand Up @@ -590,9 +581,9 @@ private void startPhaseThread()
for (TraitementPhase phase : phases) {
// if no thread in phase, start one
if (pool.get(phase).isEmpty()) {
PhaseThreadFactory a = new PhaseThreadFactory(mapParam, phase);
a.start();
pool.get(phase).add(a);
PhaseThreadFactory thread = new PhaseThreadFactory(mapParam, phase);
thread.start();
pool.get(phase).add(thread);
}
// delay between phases not to overload
Sleep.sleep(delay);
Expand All @@ -606,10 +597,13 @@ private void startMaintenanceThread() {
maintenance = new Thread() {
@Override
public void run() {
for (int poolIndex = 0; poolIndex <= numberOfPods; poolIndex++) {
DatabaseMaintenance.maintenanceDatabaseClassic(poolIndex, null,
envExecution);
try {
DatabaseMaintenance.maintenancePgCatalogAllNods(null, FormatSQL.VACUUM_OPTION_NONE);
DatabaseMaintenance.maintenancePilotage(null, envExecution, FormatSQL.VACUUM_OPTION_NONE);
} catch (ArcException e) {
e.logMessageException();
}

}
};
maintenance.start();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,14 @@ public void setListeNorme(List<Norme> listeNorme) {
this.listeNorme = listeNorme;
}

public Sandbox getCoordinatorSandbox() {
return coordinatorSandbox;
}

public void setCoordinatorSandbox(Sandbox coordinatorSandbox) {
this.coordinatorSandbox = coordinatorSandbox;
}



}
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
package fr.insee.arc.core.service.global.dao;

import java.sql.Connection;
import java.util.List;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import fr.insee.arc.core.dataobjects.ColumnEnum;
import fr.insee.arc.core.dataobjects.ViewEnum;
import fr.insee.arc.core.service.global.scalability.ServiceScalability;
import fr.insee.arc.core.util.StaticLoggerDispatcher;
import fr.insee.arc.utils.consumer.ThrowingConsumer;
import fr.insee.arc.utils.dao.UtilitaireDao;
import fr.insee.arc.utils.exception.ArcException;
import fr.insee.arc.utils.utils.FormatSQL;
Expand All @@ -20,69 +23,53 @@ private DatabaseMaintenance() {

protected static final Logger LOGGER = LogManager.getLogger(DatabaseMaintenance.class);


/**
* dispatch on every nods the mainteance of catalog
* @param optionalProvidedIdSourceToDrop
* @throws ArcException
*/
public static void maintenancePgCatalogAllNods(Connection coordinatorConnection, String type) throws ArcException {

ThrowingConsumer<Connection, ArcException> function = executorConnection -> UtilitaireDao.get(0).maintenancePgCatalog(executorConnection, type);

ServiceScalability.dispatchOnNods(coordinatorConnection, function, function);

}

/**
* Maintenance sur la table de pilotage
*
* @param connexion
* @param envExecution
* @param type
*/
private static void maintenancePilotage(Integer poolIndex, Connection connexion, String envExecution, String type) {
String tablePil = ViewEnum.PILOTAGE_FICHIER.getFullName(envExecution);
public static void maintenancePilotage(Connection coordinatorConnection, String envExecution, String type) {
StaticLoggerDispatcher.info(LOGGER, "** Maintenance Pilotage **");

String tablePil = ViewEnum.PILOTAGE_FICHIER.getFullName(envExecution);

try {
UtilitaireDao.get(poolIndex).executeImmediate(connexion, FormatSQL.analyzeSecured(tablePil));
UtilitaireDao.get(poolIndex).executeImmediate(connexion, FormatSQL.vacuumSecured(tablePil, type));
UtilitaireDao.get(0).executeImmediate(coordinatorConnection, FormatSQL.vacuumSecured(tablePil, type));
UtilitaireDao.get(0).executeImmediate(coordinatorConnection, FormatSQL.analyzeSecured(tablePil));
} catch (Exception e) {
StaticLoggerDispatcher.error(LOGGER, "Error in ApiService.maintenancePilotage");
}
}

/**
*
* @param connexion
* @param type
*/
public static void maintenancePgCatalog(Integer poolIndex, Connection connexion, String type) {
// postgres libere mal l'espace sur ces tables qaund on fait trop d'opération
// sur les colonnes
// vaccum full sinon ca fait quasiment rien ...
StaticLoggerDispatcher.info(LOGGER, "** Maintenance Catalogue **");
UtilitaireDao.get(poolIndex).maintenancePgCatalog(connexion, type);
}

/**
* classic database maintenance routine 2 vacuum are sent successively to
* analyze and remove dead tuple completely from
* analyze and vacuum the postgres catalog tables analyze
* vacuum the pilotage table located in the sandbox schema
*
* @param connexion the jdbc connexion
* @param envExecution the sandbox schema
* @throws ArcException
*/
public static void maintenanceDatabaseClassic(Connection connexion, String envExecution) {
maintenanceDatabaseClassic(0, connexion, envExecution);
}

public static void maintenanceDatabaseClassic(Integer poolIndex, Connection connexion, String envExecution) {
maintenanceDatabase(poolIndex, connexion, envExecution, FormatSQL.VACUUM_OPTION_NONE);
}

/**
* analyze and vacuum on postgres catalog tables analyze and vacuum on the
* pilotage table located in the sandbox schema
*
* @param connexion the jdbc connexion
* @param envExecution the sandbox schema
* @param typeMaintenance FormatSQL.VACUUM_OPTION_FULL or
* FormatSQL.VACUUM_OPTION_NONE
*/
private static void maintenanceDatabase(Integer poolIndex, Connection connexion, String envExecution,
String typeMaintenance) {
maintenancePgCatalog(poolIndex, connexion, typeMaintenance);

maintenancePilotage(poolIndex, connexion, envExecution, typeMaintenance);

StaticLoggerDispatcher.info(LOGGER, "** Fin de maintenance **");
public static void maintenanceDatabaseClassic(Connection coordinatorConnection, String envExecution) throws ArcException {

maintenancePgCatalogAllNods(coordinatorConnection, FormatSQL.VACUUM_OPTION_NONE);

maintenancePilotage(coordinatorConnection, envExecution, FormatSQL.VACUUM_OPTION_NONE);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,23 @@ private ServiceScalability() {
*/
public static int dispatchOnNods(Connection coordinatorConnexion, ThrowingConsumer<Connection, ArcException> actionOnCoordinator, ThrowingConsumer<Connection, ArcException> actionOnExecutor) throws ArcException
{
actionOnCoordinator.accept(coordinatorConnexion);
if (coordinatorConnexion==null)
{
try (Connection newCoordinatorConnexion = UtilitaireDao.get(ArcDatabase.COORDINATOR.getIndex()).getDriverConnexion())
{
actionOnCoordinator.accept(newCoordinatorConnexion);

} catch (SQLException | ArcException e) {
ArcException customException = new ArcException(e, ArcExceptionMessage.DATABASE_INITIALISATION_SCRIPT_FAILED);
customException.logFullException();
throw customException;
}
}
else
{
actionOnCoordinator.accept(coordinatorConnexion);
}


int numberOfExecutorNods = ArcDatabase.numberOfExecutorNods();

Expand Down
Original file line number Diff line number Diff line change
@@ -1,33 +1,14 @@
package fr.insee.arc.core.service.p0initialisation;

import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;

import fr.insee.arc.core.dataobjects.ArcPreparedStatementBuilder;
import fr.insee.arc.core.model.TraitementEtat;
import fr.insee.arc.core.model.TraitementPhase;
import fr.insee.arc.core.service.global.ApiService;
import fr.insee.arc.core.service.global.dao.DatabaseMaintenance;
import fr.insee.arc.core.service.global.dao.FileSystemManagement;
import fr.insee.arc.core.service.global.dao.TableNaming;
import fr.insee.arc.core.service.p0initialisation.filesystem.RestoreFileSystem;
import fr.insee.arc.core.service.p0initialisation.metadata.SynchronizeUserRulesAndMetadata;
import fr.insee.arc.core.service.p0initialisation.pilotage.CleanPilotage;
import fr.insee.arc.core.service.p0initialisation.pilotage.SynchronizeDataByPilotage;
import fr.insee.arc.core.service.p0initialisation.useroperation.ReplayOrDeleteFiles;
import fr.insee.arc.core.service.p0initialisation.useroperation.ResetEnvironmentOperation;
import fr.insee.arc.core.service.p1reception.ApiReceptionService;
import fr.insee.arc.utils.dao.UtilitaireDao;
import fr.insee.arc.utils.exception.ArcException;
import fr.insee.arc.utils.files.FileUtilsArc;
import fr.insee.arc.utils.structure.GenericBean;
import fr.insee.arc.utils.utils.FormatSQL;
import fr.insee.arc.utils.utils.LoggerHelper;

/**
* ApiNormageService
Expand All @@ -49,8 +30,6 @@ public ApiInitialisationService() {
super();
}

private static final Logger LOGGER = LogManager.getLogger(ApiInitialisationService.class);

public ApiInitialisationService(String aCurrentPhase, String aEnvExecution, String aDirectoryRoot, Integer aNbEnr,
String paramBatch) {
super(aCurrentPhase, aEnvExecution, aDirectoryRoot, aNbEnr, paramBatch);
Expand All @@ -70,7 +49,8 @@ public void executer() throws ArcException {
new SynchronizeUserRulesAndMetadata(this.coordinatorSandbox).synchroniserSchemaExecutionAllNods();

// marque les fichiers ou les archives à rejouer
// efface des fichiers de la table de pilotage marqués par l'utilisateur comme étant à effacer
// efface des fichiers de la table de pilotage marqués par l'utilisateur comme
// étant à effacer
new ReplayOrDeleteFiles(this.coordinatorSandbox).processMarkedFiles();

// Met en cohérence les table de données avec la table de pilotage de
Expand All @@ -83,47 +63,4 @@ public void executer() throws ArcException {

}

public void retourPhasePrecedente(TraitementPhase phase, ArcPreparedStatementBuilder querySelection,
List<TraitementEtat> listEtat) throws ArcException {
new ResetEnvironmentOperation(this.coordinatorSandbox).retourPhasePrecedente(phase, querySelection, listEtat);
}



public void resetEnvironnement() {
try {
new SynchronizeDataByPilotage(this.coordinatorSandbox).synchronizeDataByPilotage();
DatabaseMaintenance.maintenanceDatabaseClassic(connexion.getCoordinatorConnection(), envExecution);
} catch (Exception e) {
LoggerHelper.error(LOGGER, e);
}
}

public static void clearPilotageAndDirectories(String repertoire, String env) throws ArcException {
UtilitaireDao.get(0).executeBlock(null, "truncate " + TableNaming.dbEnv(env) + "pilotage_fichier; ");
UtilitaireDao.get(0).executeBlock(null, "truncate " + TableNaming.dbEnv(env) + "pilotage_archive; ");

if (Boolean.TRUE.equals(UtilitaireDao.get(0).hasResults(null, FormatSQL.tableExists("arc.ihm_entrepot")))) {
ArrayList<String> entrepotList = new GenericBean(UtilitaireDao.get(0).executeRequest(null,
new ArcPreparedStatementBuilder("select id_entrepot from arc.ihm_entrepot"))).mapContent()
.get("id_entrepot");
if (entrepotList != null) {
for (String s : entrepotList) {
FileUtilsArc.deleteAndRecreateDirectory(
Paths.get(ApiReceptionService.directoryReceptionEntrepot(repertoire, env, s)).toFile());
FileUtilsArc.deleteAndRecreateDirectory(Paths
.get(ApiReceptionService.directoryReceptionEntrepotArchive(repertoire, env, s)).toFile());
}
}
}
FileUtilsArc.deleteAndRecreateDirectory(
Paths.get(ApiReceptionService.directoryReceptionEtatEnCours(repertoire, env)).toFile());
FileUtilsArc.deleteAndRecreateDirectory(
Paths.get(ApiReceptionService.directoryReceptionEtatOK(repertoire, env)).toFile());
FileUtilsArc.deleteAndRecreateDirectory(
Paths.get(ApiReceptionService.directoryReceptionEtatKO(repertoire, env)).toFile());
FileUtilsArc.deleteAndRecreateDirectory(
Paths.get(FileSystemManagement.directoryEnvExport(repertoire, env)).toFile());
}

}
Loading

0 comments on commit 70e7ac6

Please sign in to comment.