diff --git a/.gitignore b/.gitignore index 415b3bf64..f11c4af3c 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,6 @@ datavault-webapp/pids # ignore intellij run files .run/ TEMPLATES/* -dv5/local-db/docker/backup.D.SPEED.sql \ No newline at end of file +dv5/local-db/docker/backup.D.SPEED.sql +# this can set the java version for the Intellij IDE and will set java versions for terminals too if 'sdk config set sdkman_auto_env true' +.sdkmanrc \ No newline at end of file diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/app/DataVaultBrokerApp.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/app/DataVaultBrokerApp.java index a47c078a5..71683bf80 100644 --- a/datavault-broker/src/main/java/org/datavaultplatform/broker/app/DataVaultBrokerApp.java +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/app/DataVaultBrokerApp.java @@ -55,9 +55,9 @@ JacksonConfig.class, PropertiesConfig.class, EncryptionConfig.class, ActuatorConfig.class, ScheduleConfig.class, InitialiseConfig.class, SecurityActuatorConfig.class, SecurityConfig.class, ControllerConfig.class, - ServiceConfig.class, DatabaseConfig.class, + DatabaseConfig.class, LdapConfig.class, EmailConfig.class, EmailLocalConfig.class, RabbitConfig.class, - StorageClassNameResolverConfig.class, WebConfig.class + StorageClassNameResolverConfig.class, WebConfig.class, ServiceConfig.class }) @Slf4j //@EnableJSONDoc diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/controllers/admin/AdminController.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/controllers/admin/AdminController.java index 693f27844..5e9820cd9 100644 --- a/datavault-broker/src/main/java/org/datavaultplatform/broker/controllers/admin/AdminController.java +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/controllers/admin/AdminController.java @@ -10,6 +10,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.datavaultplatform.broker.queue.Sender; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.services.*; import org.datavaultplatform.common.PropNames; import org.datavaultplatform.common.event.Event; @@ -17,7 +18,6 @@ import org.datavaultplatform.common.model.*; import org.datavaultplatform.common.response.*; -import org.datavaultplatform.common.task.Task; import org.jsondoc.core.annotation.Api; import org.jsondoc.core.annotation.ApiHeader; import org.jsondoc.core.annotation.ApiHeaders; @@ -33,8 +33,6 @@ import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.*; -import com.fasterxml.jackson.databind.ObjectMapper; - import jakarta.servlet.http.HttpServletRequest; /** @@ -60,6 +58,7 @@ public class AdminController { private final ExternalMetadataService externalMetadataService; private final AuditsService auditsService; private final RolesAndPermissionsService permissionsService; + private final AdminDepositService adminDepositService; private final Sender sender; private final String optionsDir; private final String tempDir; @@ -73,7 +72,8 @@ public AdminController(VaultsService vaultsService, UsersService usersService, DepositsService depositsService, RetrievesService retrievesService, EventService eventService, ArchiveStoreService archiveStoreService, JobsService jobsService, ExternalMetadataService externalMetadataService, AuditsService auditsService, - RolesAndPermissionsService permissionsService, Sender sender, + RolesAndPermissionsService permissionsService, AdminDepositService adminDepositService, + Sender sender, @Value("${optionsDir:#{null}}") String optionsDir, @Value("${tempDir:#{null}}") String tempDir, @Value("${s3.bucketName:#{null}}") String bucketName, @@ -90,6 +90,7 @@ public AdminController(VaultsService vaultsService, UsersService usersService, this.externalMetadataService = externalMetadataService; this.auditsService = auditsService; this.permissionsService = permissionsService; + this.adminDepositService = adminDepositService; this.sender = sender; this.optionsDir = optionsDir; this.tempDir = tempDir; @@ -356,61 +357,10 @@ public ResponseEntity deleteDeposit(@RequestHeader(HEADER_USER_ID) Strin if (user == null) { throw new Exception("User '" + userID + "' does not exist"); } - - List jobs = deposit.getJobs(); - for (Job job : jobs) { - if (job.isError() == false && job.getState() != job.getStates().size() - 1) { - // There's an in-progress job for this deposit - throw new IllegalArgumentException("Job in-progress for this Deposit"); - } - } - - List archiveStores = archiveStoreService.getArchiveStores(); - if (archiveStores.isEmpty()) { - throw new Exception("No configured archive storage"); - } - LOGGER.info("Delete deposit archiveStores : {}", archiveStores); - archiveStores = this.addArchiveSpecificOptions(archiveStores); - - // Create a job to track this delete - Job job = new Job("org.datavaultplatform.worker.tasks.Delete"); - jobsService.addJob(deposit, job); - - // Ask the worker to process the data delete - try { - HashMap deleteProperties = new HashMap<>(); - deleteProperties.put(PropNames.DEPOSIT_ID, deposit.getID()); - deleteProperties.put(PropNames.BAG_ID, deposit.getBagId()); - deleteProperties.put(PropNames.ARCHIVE_SIZE, Long.toString(deposit.getArchiveSize())); - deleteProperties.put(PropNames.USER_ID, user.getID()); - deleteProperties.put(PropNames.NUM_OF_CHUNKS, Integer.toString(deposit.getNumOfChunks())); - for (Archive archive : deposit.getArchives()) { - deleteProperties.put(archive.getArchiveStore().getID(), archive.getArchiveId()); - } - - // Add a single entry for the user file storage - Map userFileStoreClasses = new HashMap<>(); - Map> userFileStoreProperties = new HashMap<>(); - //userFileStoreClasses.put(storageID, userStore.getStorageClass()); - //userFileStoreProperties.put(storageID, userStore.getProperties()); - - - Task deleteTask = new Task( - job, deleteProperties, archiveStores, - userFileStoreProperties, userFileStoreClasses, - null, null, - null, - null, null, - null, null, null); - ObjectMapper mapper = new ObjectMapper(); - String jsonDelete = mapper.writeValueAsString(deleteTask); - sender.send(jsonDelete); - } catch (Exception e) { - LOGGER.error("Exception while deleting a deposit", e); - } + adminDepositService.deleteDeposit(deposit, user); return new ResponseEntity<>(HttpStatus.OK); - } + private List addArchiveSpecificOptions(List archiveStores) { if (archiveStores != null && ! archiveStores.isEmpty()) { for (ArchiveStore archiveStore : archiveStores) { diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/queue/EventListener.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/queue/EventListener.java index 2f6b204f3..471d28eaa 100644 --- a/datavault-broker/src/main/java/org/datavaultplatform/broker/queue/EventListener.java +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/queue/EventListener.java @@ -35,6 +35,7 @@ import org.datavaultplatform.common.event.audit.ChunkAuditStarted; import org.datavaultplatform.common.event.delete.DeleteComplete; import org.datavaultplatform.common.event.delete.DeleteStart; +import org.datavaultplatform.common.event.delete.DeletedChunk; import org.datavaultplatform.common.event.deposit.ChunksDigestEvent; import org.datavaultplatform.common.event.deposit.Complete; import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; @@ -59,6 +60,7 @@ import org.datavaultplatform.common.model.Retrieve; import org.datavaultplatform.common.model.User; import org.datavaultplatform.common.model.Vault; +import org.datavaultplatform.common.model.Archive; import org.springframework.amqp.core.Message; import org.springframework.amqp.core.MessageListener; import org.springframework.amqp.rabbit.annotation.RabbitListener; @@ -364,6 +366,8 @@ void processEvent(String messageBody, Event event, Deposit deposit, Job job) process28UploadedToUserStore(uploadedToUserStore); } else if (event instanceof UserStoreSpaceAvailableChecked userStoreSpaceAvailableChecked ){ process29UserStoreSpaceAvailableChecked(userStoreSpaceAvailableChecked); + } else if (event instanceof DeletedChunk deletedChunk ){ + process30DeletedChunk(deletedChunk); } else { throw new Exception( String.format("Failed to process unknown Event class[%s]message[%s]", event.getClass(), @@ -881,6 +885,18 @@ protected void process29UserStoreSpaceAvailableChecked(UserStoreSpaceAvailableCh ignore(event); } + protected void process30DeletedChunk(DeletedChunk deletedChunk) { + processDeposit(deletedChunk.getDeposit(), $deposit -> { + String archiveId = deletedChunk.getArchiveId(); + if (archiveId != null) { + Archive archive = archivesService.getArchiveByArchiveId(archiveId); + if (archive != null) { + deletedChunk.setArchive(archive); + } + } + }); + } + String getUserSubject(String type) { String userSubjectKey = USER_DEPOSIT_PREFIX + type; log.info("User Subject key: {}", userSubjectKey); diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/scheduled/CheckForDelete.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/scheduled/CheckForDelete.java index cadd6305a..510109e57 100644 --- a/datavault-broker/src/main/java/org/datavaultplatform/broker/scheduled/CheckForDelete.java +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/scheduled/CheckForDelete.java @@ -1,11 +1,8 @@ package org.datavaultplatform.broker.scheduled; -import com.fasterxml.jackson.databind.ObjectMapper; -import org.datavaultplatform.broker.queue.Sender; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.services.*; -import org.datavaultplatform.common.PropNames; import org.datavaultplatform.common.model.*; -import org.datavaultplatform.common.task.Task; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.scheduling.annotation.Scheduled; @@ -28,27 +25,17 @@ public class CheckForDelete implements ScheduledTask { private static final Logger log = LoggerFactory.getLogger(CheckForDelete.class); private final VaultsService vaultsService; - private final VaultsReviewService vaultsReviewService; + private final DepositsReviewService depositsReviewService; - private final ArchiveStoreService archiveStoreService; - private final RolesAndPermissionsService rolesAndPermissionsService; - private final UsersService usersService; - private final JobsService jobsService; - private final Sender sender; + private final AdminDepositService adminDepositService; @Autowired - public CheckForDelete(VaultsService vaultsService, VaultsReviewService vaultsReviewService, - DepositsReviewService depositsReviewService, ArchiveStoreService archiveStoreService, - RolesAndPermissionsService rolesAndPermissionsService, UsersService usersService, - JobsService jobsService, Sender sender) { + public CheckForDelete(VaultsService vaultsService, + DepositsReviewService depositsReviewService, + AdminDepositService adminDepositService) { this.vaultsService = vaultsService; - this.vaultsReviewService = vaultsReviewService; this.depositsReviewService = depositsReviewService; - this.archiveStoreService = archiveStoreService; - this.rolesAndPermissionsService = rolesAndPermissionsService; - this.usersService = usersService; - this.jobsService = jobsService; - this.sender = sender; + this.adminDepositService = adminDepositService; } @Override @@ -114,57 +101,6 @@ public void execute() throws Exception { // todo : move this method to a service class private void deleteDeposit(Deposit deposit) throws Exception { - log.info("Delete deposit with name " + deposit.getName()); - - List jobs = deposit.getJobs(); - for (Job job : jobs) { - if (job.isError() == false && job.getState() != job.getStates().size() - 1) { - // There's an in-progress job for this deposit - throw new IllegalArgumentException("Job in-progress for this Deposit"); - } - } - - List archiveStores = archiveStoreService.getArchiveStores(); - if (archiveStores.isEmpty()) { - throw new Exception("No configured archive storage"); - } - - log.info("Delete deposit archiveStores : {}", archiveStores); - archiveStores = archiveStoreService.addArchiveSpecificOptions(archiveStores); - - // Create a job to track this delete - Job job = new Job("org.datavaultplatform.worker.tasks.Delete"); - jobsService.addJob(deposit, job); - - // Ask the worker to process the data delete - - HashMap deleteProperties = new HashMap<>(); - deleteProperties.put(PropNames.DEPOSIT_ID, deposit.getID()); - deleteProperties.put(PropNames.BAG_ID, deposit.getBagId()); - deleteProperties.put(PropNames.ARCHIVE_SIZE, Long.toString(deposit.getArchiveSize())); - // We have no record of who requested the delete, is that acceptable? - deleteProperties.put(PropNames.USER_ID, null); - deleteProperties.put(PropNames.NUM_OF_CHUNKS, Integer.toString(deposit.getNumOfChunks())); - for (Archive archive : deposit.getArchives()) { - deleteProperties.put(archive.getArchiveStore().getID(), archive.getArchiveId()); - } - - // Add a single entry for the user file storage - Map userFileStoreClasses = new HashMap<>(); - Map> userFileStoreProperties = new HashMap<>(); - //userFileStoreClasses.put(storageID, userStore.getStorageClass()); - //userFileStoreProperties.put(storageID, userStore.getProperties()); - - Task deleteTask = new Task( - job, deleteProperties, archiveStores, - userFileStoreProperties, userFileStoreClasses, - null, null, - null, - null, null, - null, null, null); - ObjectMapper mapper = new ObjectMapper(); - String jsonDelete = mapper.writeValueAsString(deleteTask); - sender.send(jsonDelete); - + adminDepositService.deleteDeposit(deposit, null); } } diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/services/AdminDepositService.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/services/AdminDepositService.java new file mode 100644 index 000000000..a5fa3edf5 --- /dev/null +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/services/AdminDepositService.java @@ -0,0 +1,101 @@ +package org.datavaultplatform.broker.services; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.datavaultplatform.broker.queue.Sender; +import org.datavaultplatform.common.PropNames; +import org.datavaultplatform.common.model.*; +import org.datavaultplatform.common.task.Task; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Value; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Service +@Transactional +@ConditionalOnBean(Sender.class) +public class AdminDepositService { + + private static final Logger LOG = LoggerFactory.getLogger(AdminDepositService.class); + + private final ArchiveStoreService archiveStoreService; + private final JobsService jobsService; + private final Sender sender; + private final boolean workersSendDeletedChunkEvents; + + public AdminDepositService(ArchiveStoreService archiveStoreService, + JobsService jobsService, Sender sender, + @Value("${workers.send.deleted.chunk.events:false}") boolean workersSendDeletedChunkEvents) { + this.archiveStoreService = archiveStoreService; + this.jobsService = jobsService; + this.sender = sender; + this.workersSendDeletedChunkEvents = workersSendDeletedChunkEvents; + } + + public void deleteDeposit(Deposit deposit, User user) throws Exception { + final String userId = user == null ? null : user.getID(); + LOG.info("Delete deposit with name [{}] userId[{}]", deposit.getName(), userId); + + List jobs = deposit.getJobs(); + for (Job job : jobs) { + if (job.isError() == false && job.getState() != job.getStates().size() - 1) { + // There's an in-progress job for this deposit + throw new IllegalArgumentException("Job in-progress for this Deposit"); + } + } + + List archiveStores = archiveStoreService.getArchiveStores(); + if (archiveStores.isEmpty()) { + throw new Exception("No configured archive storage"); + } + + LOG.info("Delete deposit archiveStores : {}", archiveStores); + archiveStores = archiveStoreService.addArchiveSpecificOptions(archiveStores); + + // Create a job to track this delete + Job job = new Job("org.datavaultplatform.worker.tasks.Delete"); + jobsService.addJob(deposit, job); + + // Ask the worker to process the data delete + try { + + HashMap deleteProperties = new HashMap<>(); + deleteProperties.put(PropNames.DEPOSIT_ID, deposit.getID()); + deleteProperties.put(PropNames.BAG_ID, deposit.getBagId()); + deleteProperties.put(PropNames.ARCHIVE_SIZE, Long.toString(deposit.getArchiveSize())); + // NOTE : for scheduled deleted = the userId will be null + deleteProperties.put(PropNames.USER_ID, userId); + deleteProperties.put(PropNames.NUM_OF_CHUNKS, Integer.toString(deposit.getNumOfChunks())); + for (Archive archive : deposit.getArchives()) { + deleteProperties.put(archive.getArchiveStore().getID(), archive.getArchiveId()); + } + deleteProperties.put(PropNames.WORKERS_SEND_DELETED_CHUNK_EVENTS, + Boolean.toString(workersSendDeletedChunkEvents)); + + // Add a single entry for the user file storage + Map userFileStoreClasses = new HashMap<>(); + Map> userFileStoreProperties = new HashMap<>(); + //userFileStoreClasses.put(storageID, userStore.getStorageClass()); + //userFileStoreProperties.put(storageID, userStore.getProperties()); + + Task deleteTask = new Task( + job, deleteProperties, archiveStores, + userFileStoreProperties, userFileStoreClasses, + null, null, + null, + null, null, + null, null, null); + ObjectMapper mapper = new ObjectMapper(); + String jsonDelete = mapper.writeValueAsString(deleteTask); + sender.send(jsonDelete); + } catch (Exception e) { + LOG.error("Exception while deleting a deposit", e); + } + } +} diff --git a/datavault-broker/src/main/java/org/datavaultplatform/broker/services/ArchivesService.java b/datavault-broker/src/main/java/org/datavaultplatform/broker/services/ArchivesService.java index 05bbedc28..ce152e7e8 100644 --- a/datavault-broker/src/main/java/org/datavaultplatform/broker/services/ArchivesService.java +++ b/datavault-broker/src/main/java/org/datavaultplatform/broker/services/ArchivesService.java @@ -29,8 +29,12 @@ public List getArchives() { return archiveDAO.list(); } - public Archive getArchive(String archiveId) { - return archiveDAO.findById(archiveId).orElse(null); + public Archive getArchive(String id) { + return archiveDAO.findById(id).orElse(null); + } + + public Archive getArchiveByArchiveId(String archiveId) { + return archiveDAO.findByArchiveId(archiveId).orElse(null); } public void addArchive(Deposit deposit, ArchiveStore archiveStore, String archiveId) { diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/ActuatorTest.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/ActuatorTest.java index 22a46c39b..8e381ff82 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/ActuatorTest.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/ActuatorTest.java @@ -4,6 +4,7 @@ import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.queue.Sender; import org.datavaultplatform.broker.services.FileStoreService; import org.datavaultplatform.broker.test.AddTestProperties; @@ -59,6 +60,9 @@ public class ActuatorTest extends BaseDatabaseTest { @MockBean FileStoreService mFileStoreService; + @MockBean + AdminDepositService mAdminDepositService; + @Test void setup() { when(mFileStoreService.getFileStores()).thenReturn(Collections.emptyList()); diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/OpenApiBrokerTest.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/OpenApiBrokerTest.java index d98f5c73f..39394f119 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/OpenApiBrokerTest.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/actuator/OpenApiBrokerTest.java @@ -3,6 +3,7 @@ import io.swagger.v3.oas.models.OpenAPI; import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.queue.Sender; import org.datavaultplatform.broker.services.FileStoreService; import org.datavaultplatform.broker.test.AddTestProperties; @@ -46,6 +47,9 @@ public class OpenApiBrokerTest extends BaseDatabaseTest { @MockBean FileStoreService mFileStoreService; + + @MockBean + AdminDepositService mAdminDepositService; @Autowired OpenAPI openApi; diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/authentication/FileStoreControllerIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/authentication/FileStoreControllerIT.java index 51ae5dfc9..94f3c1211 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/authentication/FileStoreControllerIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/authentication/FileStoreControllerIT.java @@ -32,6 +32,7 @@ import org.datavaultplatform.broker.actuator.SftpFileStoreEndpoint; import org.datavaultplatform.broker.actuator.SftpFileStoreInfo; import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.queue.Sender; import org.datavaultplatform.broker.test.AddTestProperties; import org.datavaultplatform.broker.test.BaseDatabaseTest; @@ -104,6 +105,8 @@ public class FileStoreControllerIT extends BaseDatabaseTest { String passphrase; @MockBean Sender sender; + @MockBean + AdminDepositService adminDepositService; @Autowired MockMvc mvc; @Autowired diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/config/InitialiseBeansConfigIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/config/InitialiseBeansConfigIT.java new file mode 100644 index 000000000..eca83c9bd --- /dev/null +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/config/InitialiseBeansConfigIT.java @@ -0,0 +1,117 @@ +package org.datavaultplatform.broker.config; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.controllers.admin.AdminController; +import org.datavaultplatform.broker.queue.Sender; +import org.datavaultplatform.broker.services.AdminDepositService; +import org.datavaultplatform.broker.test.AddTestProperties; +import org.datavaultplatform.common.docker.DockerImage; +import org.datavaultplatform.common.util.UsesTestContainers; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.testcontainers.service.connection.ServiceConnection; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; +import org.springframework.test.context.TestPropertySource; +import org.testcontainers.containers.BindMode; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.MariaDBContainer; +import org.testcontainers.containers.RabbitMQContainer; +import org.testcontainers.containers.startupcheck.MinimumDurationRunningStartupCheckStrategy; +import org.testcontainers.junit.jupiter.Container; + +import java.time.Duration; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.datavaultplatform.broker.services.BaseEmailServiceTest.PORT_HTTP; +import static org.datavaultplatform.broker.services.BaseEmailServiceTest.PORT_SMTP; +import static org.datavaultplatform.common.ldap.BaseLDAPServiceIT.LDAP_ADMIN_PASSWORD; +import static org.datavaultplatform.common.ldap.BaseLDAPServiceIT.LDAP_EXPOSED_PORT; + +/* +Added this test to check on spring bean wiring. +When we use testcontainers for Rabbit, MariaDB, LDAP and EMAIL - will all the beans wire up. +There was a problem that AdminDepositService was not being created when the Broker was run as an application (not a test) +Note: AdminDepositService has '@ConditionalOnBean' - this is evaluated when the bean is first loaded, therefore, the order we +load java config files in DataVaultBrokerApp does matter. + */ +@SpringBootTest(classes = DataVaultBrokerApp.class) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = { + "broker.controllers.enabled=true", + "broker.services.enabled=true", + "broker.scheduled.enabled=true", + "broker.rabbit.enabled=true", + "broker.database.enabled=true", + "auditdeposit.schedule=-", + "encryptioncheck.schedule=-", + "review.schedule=-", + "delete.schedule=-", + "retentioncheck.schedule=-"}) +@UsesTestContainers +class InitialiseBeansConfigIT { + + @Container + @ServiceConnection + // This container is once per class - not once per method. Methods can 'dirty' the database. + static final MariaDBContainer mariadb = new MariaDBContainer<>(DockerImage.MARIADB_IMAGE); + + @Container + @ServiceConnection + private static final RabbitMQContainer RABBIT = new RabbitMQContainer(DockerImage.RABBIT_IMAGE_NAME) + .withExposedPorts(5672,15672); + + @Container + private static final GenericContainer LDAP_CONTAINER = new GenericContainer<>(DockerImage.LDAP_IMAGE) + .withEnv("LDAP_ROOT", "o=myu.ed") + .withEnv("LDAP_ADMIN_PASSWORD", LDAP_ADMIN_PASSWORD) + //SCHEMA - allows 'eduniRefNo' attributes - via LDIF file + .withClasspathResourceMapping("ldap/eduniPersonSchema.ldif", "/schema/custom.ldif", + BindMode.READ_ONLY) + //USERS via LDIF file + .withClasspathResourceMapping("ldap/testUsers.ldif", "/custom/testUsers.ldif", + BindMode.READ_ONLY) + .withEnv("LDAP_CUSTOM_LDIF_DIR", "/custom") + .withExposedPorts(LDAP_EXPOSED_PORT) + .withStartupCheckStrategy( + //Gotta allow time for openldap to initialise + new MinimumDurationRunningStartupCheckStrategy(Duration.ofSeconds(5)) + ); + + + @Container + private static final GenericContainer MAILHOG_CONTAINER + = new GenericContainer<>(DockerImage.MAIL_IMAGE).withExposedPorts(PORT_SMTP, PORT_HTTP); + + @DynamicPropertySource + static void setupProperties(DynamicPropertyRegistry registry) { + setupMailProperties(registry); + } + + public static void setupMailProperties(DynamicPropertyRegistry registry) { + registry.add("tc.mailhog.http", () -> MAILHOG_CONTAINER.getMappedPort(PORT_HTTP)); + registry.add("mail.host", MAILHOG_CONTAINER::getHost); + registry.add("mail.port", () -> MAILHOG_CONTAINER.getMappedPort(PORT_SMTP)); + log.info("email http://localhost:{}", MAILHOG_CONTAINER.getMappedPort(PORT_HTTP)); + } + + @Autowired + AdminDepositService adminDepositService; + + @Autowired + AdminController adminController; + + @Autowired + Sender sender; + + @Test + void testBeans() { + assertThat(adminDepositService).isNotNull(); + assertThat(adminController).isNotNull(); + assertThat(sender).isNotNull(); + } + +} diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/config/MockServicesConfig.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/config/MockServicesConfig.java index 5c71f4780..d2fe5d143 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/config/MockServicesConfig.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/config/MockServicesConfig.java @@ -1,6 +1,7 @@ package org.datavaultplatform.broker.config; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.services.*; import org.springframework.boot.test.context.TestConfiguration; import org.springframework.boot.test.mock.mockito.MockBean; @@ -98,4 +99,6 @@ public class MockServicesConfig { @MockBean VaultsService mVaultsService; + @MockBean + AdminDepositService mAdminDepositService; } diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/DepositControllerIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/DepositControllerIT.java index 834319800..813c4ef67 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/DepositControllerIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/DepositControllerIT.java @@ -3,6 +3,7 @@ import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; import org.datavaultplatform.broker.config.MockRabbitConfig; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.services.*; import org.datavaultplatform.broker.test.AddTestProperties; import org.datavaultplatform.broker.test.BaseDatabaseTest; @@ -54,7 +55,10 @@ class DepositControllerIT extends BaseDatabaseTest { @MockBean EmailService emailService; - + + @MockBean + AdminDepositService mAdminDepositService; + @Autowired DepositsController controller; diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/GenerateDepositMessageTest.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/GenerateDepositMessageTest.java index 5123b5ecf..d4058fc06 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/GenerateDepositMessageTest.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/GenerateDepositMessageTest.java @@ -31,6 +31,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.junit.jupiter.MockitoExtension; +import org.skyscreamer.jsonassert.JSONAssert; @ExtendWith(MockitoExtension.class) @@ -45,7 +46,7 @@ public class GenerateDepositMessageTest extends BaseGenerateMessageTest { private static final String FILE_STORE_SRC_LABEL = "FILE_STORE-SRC-LABEL"; final File srcDir = new File(baseDir, "src"); - + @Captor ArgumentCaptor argMessage; private DepositsController dc; @@ -148,7 +149,9 @@ void testAddDepositToGenerateDepositMessage() { assertEquals(destPath, actualDestPath); JsonNode expected = mapper.readTree(getExpectedJson(bagId, srcPath, destPath)); - assertEquals(expected, convert(generated)); + JsonNode actual = convert(generated); + JSONAssert.assertEquals(expected.toPrettyString(), actual.toPrettyString(), true); + assertEquals(expected, actual); log.info("Generated Message {}", expected.toPrettyString()); log.info("END SENT MESSAGE"); } @@ -192,11 +195,11 @@ private String getExpectedJson(String bagId, String srcRoot, String destRoot) { + " \"userFileStoreClasses\" : {" + " \"FILE-STORE-SRC-ID\" : \"org.datavaultplatform.common.storage.impl.LocalFileSystem\"" + " }," - + " \"chunkFilesDigest\" : null," + + " \"chunkFilesDigest\" : {}," + " \"tarIV\" : null," - + " \"chunksIVs\" : null," + + " \"chunksIVs\" : {}," + " \"encTarDigest\" : null," - + " \"encChunksDigest\" : null," + + " \"encChunksDigest\" : {}," + " \"lastEvent\" : null," + " \"chunksToAudit\" : null," + " \"archiveIds\" : null," diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/RetrieveRestartIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/RetrieveRestartIT.java index 388f2bafc..79be2235c 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/RetrieveRestartIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/controllers/RetrieveRestartIT.java @@ -6,6 +6,7 @@ import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.email.EmailBodyGenerator; import org.datavaultplatform.broker.queue.MessageIdProcessedListener; import org.datavaultplatform.broker.services.*; @@ -152,6 +153,9 @@ void init() { @MockBean MessageIdProcessedListener mMessageIdProcessedListener; + @MockBean + AdminDepositService mAdminDepositService; + List processedMessageIds; protected org.datavaultplatform.common.model.ArchiveStore archiveStore; diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerIT.java index e07222dab..078195280 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerIT.java @@ -6,9 +6,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import java.time.LocalDate; -import java.util.Base64; -import java.util.Date; -import java.util.List; +import java.util.*; +import java.util.stream.Stream; + import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; @@ -25,6 +25,7 @@ import org.datavaultplatform.common.event.audit.ChunkAuditStarted; import org.datavaultplatform.common.event.delete.DeleteComplete; import org.datavaultplatform.common.event.delete.DeleteStart; +import org.datavaultplatform.common.event.delete.DeletedChunk; import org.datavaultplatform.common.event.deposit.Complete; import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; import org.datavaultplatform.common.event.deposit.ComputedChunks; @@ -45,7 +46,6 @@ import org.junit.jupiter.params.provider.ValueSource; import org.springframework.amqp.rabbit.listener.RabbitListenerEndpointRegistry; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.context.annotation.Import; @@ -65,8 +65,10 @@ @Import({EventListener.class, TaskTimerSupport.class}) @Slf4j @TestMethodOrder(MethodOrderer.MethodName.class) -public class EventListenerIT extends BaseDatabaseTest { +class EventListenerIT extends BaseDatabaseTest { + private static final String TEST_ARCHIVE_ID = "TEST-ARCHIVE_ID"; + @MockBean EmailService emailService; @@ -100,6 +102,15 @@ public class EventListenerIT extends BaseDatabaseTest { @Autowired AuditsService auditsService; + @Autowired + EventService eventService; + + @Autowired + ArchivesService archivesService; + + @Autowired + ArchiveStoreService archiveStoreService; + @MockBean RabbitListenerEndpointRegistry registry; private final String userId = "user123"; @@ -124,8 +135,6 @@ public class EventListenerIT extends BaseDatabaseTest { Audit audit; Group group; - @Autowired - private EventService eventService; @BeforeEach void setup(){ @@ -208,6 +217,16 @@ void setup(){ assertThat(retrieve.getID()).isNotNull(); } + Optional getLastJobEvent(String jobID) { + List allEvents = eventService.getEvents(); + Stream jobEvents = allEvents.stream() + .filter(ev -> jobID.equals(ev.getJob().getID())); + Optional lastJobEvent = jobEvents + .sorted(Comparator.comparing(Event::getSequence)) //sort by sequence number ascending + .reduce((first, second) -> second); //this is a trick to get the last event + return lastJobEvent; + } + @Test void test00EventListener() { assertNotNull(eventListener); @@ -274,6 +293,7 @@ void test02v1updateProgress() { + " \"agentType\": \"WORKER\"" + " }"; Event event = eventListener.onMessageInternal(message); + assertThat(event).isInstanceOf(UpdateProgress.class); } @SneakyThrows @@ -521,6 +541,8 @@ void test10Error() { + " \"timestamp\": \"2022-09-16T15:12:40.152Z\"," + " \"sequence\": 36," + " \"persistent\": true," + + " \"chunkNumber\": 123," + + " \"message\": \"the error message\"," + " \"depositId\": \"" + depositId + "\"," + " \"vaultId\" : \"" + vaultId + "\"," + " \"jobId\" : \"" + jobGenericId + "\"," @@ -530,6 +552,20 @@ void test10Error() { + " }"; Event event = eventListener.onMessageInternal(message); assertEquals(org.datavaultplatform.common.event.Error.class, event.getClass()); + + // double check that we have saved the Error event to the database by fetching it and checking it + + org.datavaultplatform.common.event.Error error = (org.datavaultplatform.common.event.Error) event; + assertEquals(123, error.getChunkNumber()); + assertEquals("the error message", error.getMessage()); + + Optional lastDepositJobEventOpt = getLastJobEvent(jobGenericId); + Event lastDepositEvent = lastDepositJobEventOpt.orElseThrow(); + assertThat(lastDepositEvent).isEqualTo(event); + assertThat(lastDepositEvent.getJob()).isEqualTo(event.getJob()); + assertThat(lastDepositEvent.getDeposit()).isEqualTo(event.getDeposit()); + assertThat(lastDepositEvent.getMessage()).isEqualTo(event.getMessage()); + assertThat(lastDepositEvent.getChunkNumber()).isEqualTo(event.getChunkNumber()); } @Nested @@ -736,8 +772,8 @@ void test29UserStoreSpaceAvailableChecked() { }) @SneakyThrows void testRetrieveError(String eventClass) { - Class clazz = Class.forName(eventClass); - assertThat(Event.class.isAssignableFrom(clazz)); + Class clazz = Class.forName(eventClass); + assertThat(Event.class).isAssignableFrom(clazz); String message = "{" + " \"message\": \"CUSTOM ERROR MESSAGE\"," + " \"eventClass\": \"" + eventClass + "\"," @@ -1065,4 +1101,89 @@ void test24ValidationComplete() { Event event = eventListener.onMessageInternal(message); assertEquals(ValidationComplete.class, event.getClass()); } + + @Test + @SneakyThrows + void test30DeletedChunk() { + ArchiveStore archiveStore = new ArchiveStore(); + archiveStoreService.addArchiveStore(archiveStore); + + archivesService.addArchive(this.deposit, archiveStore, TEST_ARCHIVE_ID); + + assertThat(archivesService.getArchiveByArchiveId(TEST_ARCHIVE_ID)).isNotNull(); + String message = "{" + + " \"message\" : \"Deleted Chunk [7/10] from (MultiLocationsArchiveStoreSuccessImpl/TEST-ARCHIVE-STORE-ID//private/tmp/delete/location-one)\"," + + " \"eventClass\" : \"org.datavaultplatform.common.event.delete.DeletedChunk\"," + + " \"timestamp\" : \"2026-02-03T15:05:08.385Z\"," + + " \"sequence\" : 123," + + " \"persistent\" : true," + + " \"depositId\" : \"" + depositId + "\"," + + " \"jobId\" : \"" + jobDepositId + "\"," + + " \"userId\" : \"" + userId + "\"," + + " \"agent\" : \"datavault-worker-1\"," + + " \"agentType\" : \"WORKER\"," + + " \"archiveId\" : \"" + TEST_ARCHIVE_ID + "\"," + + " \"location\" : \"/private/tmp/delete/location-one\"," + + " \"assigneeId\" : null," + + " \"chunkNumber\" : 123," + + " \"archiveStoreId\" : \"TEST-ARCHIVE-STORE-ID\"" + + "}"; + + Event event = eventListener.onMessageInternal(message); + assertEquals(DeletedChunk.class, event.getClass()); + DeletedChunk dc = (DeletedChunk) event; + assertThat(dc.getID()) + .withFailMessage("ID is null") + .isNotNull(); + // DEPOSIT + assertThat(dc.getDeposit()) + .withFailMessage("Deposit is null") + .isNotNull(); + assertThat(dc.getDepositId()) + .withFailMessage("DepositId is null") + .isNotNull(); + assertThat(dc.getJob()) + .withFailMessage("Job is null") + .isNotNull(); + assertThat(dc.getJobId()) + .withFailMessage("JobId is null") + .isNotNull(); + // USER + assertThat(dc.getUser()) + .withFailMessage("User is null") + .isNotNull(); + assertThat(dc.getUserId()) + .withFailMessage("UserId is null") + .isNotNull(); + // AGENT + assertThat(dc.getAgent()) + .withFailMessage("Agent is null") + .isNotNull(); + // Archive + assertThat(dc.getArchive()) + .withFailMessage("Archive is null") + .isNotNull(); + assertThat(dc.getArchiveId()) + .withFailMessage("ArchiveId is null") + .isNotNull(); + // ArchiveStoreId + assertThat(dc.getArchiveStoreId()) + .withFailMessage("ArchiveStoreId is null") + .isEqualTo("TEST-ARCHIVE-STORE-ID"); + // AgentType + assertThat(dc.getAgentType()) + .withFailMessage("AgentType is null") + .isNotNull(); + // VAULT + assertThat(dc.getVault()) + .withFailMessage("Vault is NOT NULL") + .isNull(); + assertThat(dc.getVaultId()) + .withFailMessage("VaultId is NOT NULL") + .isNull(); + assertThat(dc.getChunkNumber()) + .isEqualTo(123); + assertThat(dc.getLocation()) + .isEqualTo("/private/tmp/delete/location-one"); + } } diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerTest.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerTest.java index 1d66f93c2..b6d25cb03 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerTest.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/queue/EventListenerTest.java @@ -2170,7 +2170,7 @@ public String getDigestAlgorithm() { } }; assertEquals(0, deposit.getNumOfChunks()); - assertNull(deposit.getDepositChunks()); + assertThat(deposit.getDepositChunks().isEmpty()); doNothing().when(depositsService).updateDeposit(deposit); sut.updateDepositWithChunks(deposit, event); diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/scheduled/ScheduledTasksWithDbIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/scheduled/ScheduledTasksWithDbIT.java index 269789ea4..1ce3ea92c 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/scheduled/ScheduledTasksWithDbIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/scheduled/ScheduledTasksWithDbIT.java @@ -8,6 +8,7 @@ import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; import org.datavaultplatform.broker.config.MockRabbitConfig; +import org.datavaultplatform.broker.services.AdminDepositService; import org.datavaultplatform.broker.services.EmailService; import org.datavaultplatform.broker.test.AddTestProperties; import org.datavaultplatform.broker.test.BaseReuseDatabaseTest; @@ -68,6 +69,9 @@ public class ScheduledTasksWithDbIT extends BaseReuseDatabaseTest { @MockBean EmailService emailService; + @MockBean + AdminDepositService adminDepositService; + @Autowired AuditDepositsChunks scheduled1auditDepositsChunks; diff --git a/datavault-broker/src/test/java/org/datavaultplatform/broker/services/VaultsServiceIT.java b/datavault-broker/src/test/java/org/datavaultplatform/broker/services/VaultsServiceIT.java index 387737f5f..04ff4496c 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/broker/services/VaultsServiceIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/broker/services/VaultsServiceIT.java @@ -14,6 +14,7 @@ import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.context.annotation.Import; import org.springframework.test.context.TestPropertySource; @@ -32,6 +33,9 @@ public class VaultsServiceIT extends BaseReuseDatabaseTest { @Autowired private RolesAndPermissionsService rolesAndPermissionsService; + @MockBean + AdminDepositService adminDepositService; + @Test public void checkVaultCount() { RoleAssignment isAdminRoleAssignment = new RoleAssignment(); diff --git a/datavault-broker/src/test/java/org/datavaultplatform/common/model/dao/EventDAOIT.java b/datavault-broker/src/test/java/org/datavaultplatform/common/model/dao/EventDAOIT.java index 6a0465bcc..72f9bba95 100644 --- a/datavault-broker/src/test/java/org/datavaultplatform/common/model/dao/EventDAOIT.java +++ b/datavault-broker/src/test/java/org/datavaultplatform/common/model/dao/EventDAOIT.java @@ -15,16 +15,20 @@ import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.datavaultplatform.broker.app.DataVaultBrokerApp; +import org.datavaultplatform.broker.services.EventService; import org.datavaultplatform.broker.test.AddTestProperties; import org.datavaultplatform.broker.test.BaseDatabaseTest; import org.datavaultplatform.broker.test.TestUtils; import org.datavaultplatform.common.event.Event; +import org.datavaultplatform.common.event.delete.DeletedChunk; import org.datavaultplatform.common.event.deposit.*; import org.datavaultplatform.common.event.Error; import org.datavaultplatform.common.event.retrieve.*; +import org.datavaultplatform.common.model.Agent; import org.datavaultplatform.common.model.Deposit; import org.datavaultplatform.common.model.Job; import org.datavaultplatform.common.model.Vault; +import org.datavaultplatform.common.storage.impl.LocalFileSystem; import org.datavaultplatform.common.util.RetrievedChunks; import org.datavaultplatform.common.util.StoredChunks; import org.junit.jupiter.api.AfterEach; @@ -65,7 +69,10 @@ public class EventDAOIT extends BaseDatabaseTest { @PersistenceContext EntityManager em; - + + @Autowired + private EventService eventService; + @Nested class BlobTests { @@ -778,4 +785,63 @@ RetrieveComplete get06RetrieveComplete(String jobId, String depositId, String re return new RetrieveComplete(jobId, depositId, retrieveId); } } + + /** + * Tests that the Deposit and Job associated with a DeletedChunk event are correctly handled + * by Hibernate's session cache/identity map. + * Verifies that when retrieving a DeletedChunk event, the associated Deposit and Job entities + * are the same instances as those expected. + * We have to use EntityManager::flush to force saving to DB as generally DB writes happen at end of transaction. + */ + @Transactional + @Test + void testDepositAndJobLazyLoadingOfDeletedChunkEvent() { + + Deposit deposit = new Deposit(); + deposit.setHasPersonalData(false); + deposit.setName("test-deposit-name"); + depositDAO.save(deposit); + + Job job = new Job(); + job.setDeposit(deposit); + jobDAO.save(job); + + em.flush(); + em.clear(); + assertThat(em.contains(deposit)).isFalse(); + assertThat(em.contains(job)).isFalse(); + + DeletedChunk dc = new DeletedChunk(job.getID(), deposit.getID(), 123, 999, LocalFileSystem.class, "TST-ARCHIVE-STORE-ID", "TST-LOCATION"); + dc.setAgent("TST-AGENT"); + dc.setAgentType(Agent.AgentType.WORKER); + dc.setMessage("test-message"); + dc.setJob(job); + dc.setDeposit(deposit); + + eventService.addEvent(dc); + deposit.getEvents().add(dc); + + em.flush(); + em.clear(); + + // get back the job and deposit from db - check that have correct ids + Deposit depoFromDb = depositDAO.getReferenceById(deposit.getID()); + Job jobFromDeposit = depoFromDb.getJobs().get(0); + assertThat(depoFromDb.getID()).isEqualTo(deposit.getID()); + assertThat(jobFromDeposit.getID()).isEqualTo(job.getID()); + assertThat(em.contains(depoFromDb)).isTrue(); + assertThat(em.contains(jobFromDeposit)).isTrue(); + + // get back event from db - check it has correct id and Deposit and Job are existing hibernate objects + Event eventFromDb = eventService.getEvent(dc.getID()); + assertThat(eventFromDb.getID()).isEqualTo(dc.getID()); + + assertThat(em.contains(eventFromDb)).isTrue(); + + assertThat(eventFromDb.getDeposit()).isSameAs(depoFromDb); + assertThat(eventFromDb.getJob()).isSameAs(jobFromDeposit); + + Event depositEvent1 = depoFromDb.getEvents().get(0); + assertThat(depositEvent1).isSameAs(eventFromDb); + } } diff --git a/datavault-broker/src/test/resources/samples/sampleDeleteChunkError.json b/datavault-broker/src/test/resources/samples/sampleDeleteChunkError.json new file mode 100644 index 000000000..10650ad9d --- /dev/null +++ b/datavault-broker/src/test/resources/samples/sampleDeleteChunkError.json @@ -0,0 +1,27 @@ +{ + "id": null, + "message": "Deposit delete failed: ArchiveStore[ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[no-location]ChunkNum[1]Cause[java.lang.RuntimeException/oops@1]", + "retrieveId": null, + "eventClass": "org.datavaultplatform.common.event.Error", + "nextState": null, + "timestamp": "2026-01-30T10:19:30.443Z", + "sequence": 0, + "persistent": true, + "depositId": "TEST-DEPOSIT-ID", + "vaultId": null, + "jobId": "TEST-JOB-ID", + "userId": "TEST-USER-ID", + "agent": null, + "remoteAddress": null, + "userAgent": null, + "agentType": null, + "auditId": null, + "archiveId": "TEST-ARCHIVE-ID", + "location": "no-location", + "chunkId": null, + "assigneeId": null, + "schoolId": null, + "roleId": null, + "chunkNumber": 1, + "archiveStoreId": "TEST-ARCHIVE-STORE-ID" +} \ No newline at end of file diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/PropNames.java b/datavault-common/src/main/java/org/datavaultplatform/common/PropNames.java index fda19d984..9390b7699 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/PropNames.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/PropNames.java @@ -57,4 +57,5 @@ public interface PropNames { String USER_FS_RETRY_DELAY_MS_1 = "userFsRetryDelayMs1"; String USER_FS_RETRY_DELAY_MS_2 = "userFsRetryDelayMs2"; String NON_RESTART_JOB_ID = "nonRestartJobId"; + String WORKERS_SEND_DELETED_CHUNK_EVENTS = "workersSendDeletedChunkEvents"; } diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/event/Event.java b/datavault-common/src/main/java/org/datavaultplatform/common/event/Event.java index ef0388bf3..71f01d3c3 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/event/Event.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/event/Event.java @@ -33,6 +33,7 @@ @JsonSubTypes.Type(value = DeleteStart.class, name = "org.datavaultplatform.common.event.delete.DeleteStart"), @JsonSubTypes.Type(value = DeleteComplete.class, name = "org.datavaultplatform.common.event.delete.DeleteComplete"), + @JsonSubTypes.Type(value = DeletedChunk.class, name = "org.datavaultplatform.common.event.delete.DeletedChunk"), @JsonSubTypes.Type(value = ValidationComplete.class, name = "org.datavaultplatform.common.event.deposit.ValidationComplete"), @JsonSubTypes.Type(value = ComputedSize.class, name = "org.datavaultplatform.common.event.deposit.ComputedSize"), @@ -81,7 +82,7 @@ @JsonSubTypes.Type(value = Event.class, name = "org.datavaultplatform.common.event.Event"), @JsonSubTypes.Type(value = Error.class, name = "org.datavaultplatform.common.event.Error"), @JsonSubTypes.Type(value = InitStates.class, name = "org.datavaultplatform.common.event.InitStates"), - @JsonSubTypes.Type(value = UpdateProgress.class, name = "org.datavaultplatform.common.event.UpdateProgress") + @JsonSubTypes.Type(value = UpdateProgress.class, name = "org.datavaultplatform.common.event.UpdateProgress"), }) @JsonIgnoreProperties(ignoreUnknown = true) @Entity diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/event/delete/DeletedChunk.java b/datavault-common/src/main/java/org/datavaultplatform/common/event/delete/DeletedChunk.java new file mode 100644 index 000000000..b80d23850 --- /dev/null +++ b/datavault-common/src/main/java/org/datavaultplatform/common/event/delete/DeletedChunk.java @@ -0,0 +1,33 @@ +package org.datavaultplatform.common.event.delete; + +import jakarta.persistence.Entity; + +import org.datavaultplatform.common.event.Event; +import org.datavaultplatform.common.storage.ArchiveStore; + +@Entity +public class DeletedChunk extends Event { + + public DeletedChunk() { + } + + public DeletedChunk(String jobId, String depositId, + int chunkNumber, int numberOfChunks, + Class archiveType, String archiveStoreId, String location) { + super("Deleted Chunk [%d/%d] from (%s/%s/%s)" + .formatted(chunkNumber, numberOfChunks, archiveType.getSimpleName(), archiveStoreId, location)); + this.setEventClass(DeletedChunk.class.getCanonicalName()); + this.setDepositId(depositId); + this.setJobId(jobId); + this.setChunkNumber(chunkNumber); + this.setArchiveStoreId(archiveStoreId); + + // location is not persisted to database - that's why it's also in the message + this.setLocation(location); + } + + @Override + public String toString() { + return getMessage(); + } +} diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/model/Deposit.java b/datavault-common/src/main/java/org/datavaultplatform/common/model/Deposit.java index 1ca0612f4..23f25bd73 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/model/Deposit.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/model/Deposit.java @@ -64,33 +64,33 @@ public class Deposit implements Identified { @JsonIgnore @OneToMany(targetEntity=Event.class, mappedBy="deposit", fetch=FetchType.LAZY) @OrderBy("timestamp, sequence") - private List events; + private List events = new ArrayList<>(); // A Deposit can have a number of deposit paths @OneToMany(targetEntity=DepositPath.class, mappedBy="deposit", fetch=FetchType.LAZY, cascade = CascadeType.ALL) - private List depositPaths; + private List depositPaths = new ArrayList<>(); // A Deposit can have a number of deposit chunks @OneToMany(targetEntity=DepositChunk.class, mappedBy="deposit", fetch=FetchType.LAZY, cascade = CascadeType.ALL) - private List depositChunks; + private List depositChunks = new ArrayList<>(); // A Deposit can have a number of active jobs @JsonIgnore @OneToMany(targetEntity=Job.class, mappedBy="deposit", fetch=FetchType.LAZY) @OrderBy("timestamp") - private List jobs; + private List jobs = new ArrayList<>(); // A Deposit can have a number of retrieves @JsonIgnore @OneToMany(targetEntity=Retrieve.class, mappedBy="deposit", fetch=FetchType.LAZY) @OrderBy("timestamp") - private List retrieves; + private List retrieves = new ArrayList<>(); // A Deposit can have a number of reviews @JsonIgnore @OneToMany(targetEntity=DepositReview.class, mappedBy="deposit", fetch=FetchType.LAZY) @OrderBy("creationTime") - private List depositReviews; + private List depositReviews = new ArrayList<>(); @ApiObjectField(description = "Status of the Deposit", allowedvalues={"NOT_STARTED", "IN_PROGRESS", "COMPLETE"}) diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/model/dao/ArchiveDAO.java b/datavault-common/src/main/java/org/datavaultplatform/common/model/dao/ArchiveDAO.java index be0061189..235c7aaaf 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/model/dao/ArchiveDAO.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/model/dao/ArchiveDAO.java @@ -30,4 +30,7 @@ public interface ArchiveDAO extends BaseDAO { LIMIT 1 """) Optional findLatestByDepositIdAndArchiveStoreId(String depositId, String archiveStoreId); + + @EntityGraph(Archive.EG_ARCHIVE) + Optional findByArchiveId(String archiveId); } diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/storage/impl/FileSystemUtils.java b/datavault-common/src/main/java/org/datavaultplatform/common/storage/impl/FileSystemUtils.java index 871f3fa98..47948f87b 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/storage/impl/FileSystemUtils.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/storage/impl/FileSystemUtils.java @@ -17,6 +17,9 @@ public class FileSystemUtils { private static final String EMPTY_STRING = ""; + private FileSystemUtils() { + } + public static Path getAbsolutePath(String filePath, String location) { // Join the requested path to the root of the filesystem. @@ -73,7 +76,16 @@ public static long getUsableSpace(File file) { public static void delete(String path, String location) throws Exception { Path absolutePath = getAbsolutePath(path, location); - Files.deleteIfExists(absolutePath); + log.info("Starting to Delete [{}] ", absolutePath); + boolean deleted = false; + try { + deleted = Files.deleteIfExists(absolutePath); + } catch(Exception ex) { + log.info("Attempt to Delete [{}] failed ", absolutePath, ex); + throw ex; + } finally { + log.info("Attempt to Delete [{}] success ? {}", absolutePath, deleted); + } } public static String store(String path, File working, Progress progress, String location) throws Exception{ diff --git a/datavault-common/src/main/java/org/datavaultplatform/common/task/TaskExecutor.java b/datavault-common/src/main/java/org/datavaultplatform/common/task/TaskExecutor.java index 0876ccdc7..c8aa471f8 100644 --- a/datavault-common/src/main/java/org/datavaultplatform/common/task/TaskExecutor.java +++ b/datavault-common/src/main/java/org/datavaultplatform/common/task/TaskExecutor.java @@ -1,6 +1,8 @@ package org.datavaultplatform.common.task; import org.datavaultplatform.common.util.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.util.Assert; import java.util.ArrayList; @@ -8,12 +10,13 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; public class TaskExecutor { - private final int numThreads; - private final String errorLabel; + private static final Logger LOG = LoggerFactory.getLogger(TaskExecutor.class); + private static final long TIMEOUT_MINUTES = 5; + private final int numThreads; + private final String errorLabel; private final List> tasks = new ArrayList<>(); private final List> origTasks = new ArrayList<>(); @@ -52,31 +55,66 @@ private Callable wrap(Callable task) { public synchronized void execute() throws Exception { execute(result -> {}); } - - public synchronized void execute(Consumer consumer) throws Exception { - if (executed.getAndSet(true)) { - throw new IllegalStateException("Already executed"); - } - ExecutorService service = Executors.newFixedThreadPool(numThreads); + public synchronized void execute(Consumer consumer) throws Exception { + if (executed.getAndSet(true)) { + throw new IllegalStateException("Already executed"); + } - List> futures = tasks.stream() - .map(service::submit) - .toList(); + ExecutorService service = Executors.newFixedThreadPool(numThreads); - service.shutdown(); + List> futures = tasks.stream() + .map(service::submit) + .toList(); + try { + // shutdown - prevents the submission of further Callable Tasks + service.shutdown(); - // service.awaitTermination(1, TimeUnit.MINUTES); + for (Future future : futures) { + getResultFromFuture(future, consumer); + } - for (Future future : futures) { - try { - T result = future.get(); - consumer.accept(result); - } catch (ExecutionException ee) { - Utils.handleExecutionException(ee, errorLabel); - } + } catch (InterruptedException e) { + LOG.error("Main thread interrupted!"); + // Restore the status so the calling code knows we were interrupted + Thread.currentThread().interrupt(); + } finally { + handleShutdown(service); + } } - service.shutdownNow(); - } + private void getResultFromFuture(Future future, Consumer consumer) throws Exception { + try { + T result = future.get(); + consumer.accept(result); + } catch (ExecutionException ee) { + Utils.handleExecutionException(ee, errorLabel); + } + } + + private void handleShutdown(ExecutorService executor) { + // If it's already fully closed, we're done. + if (executor == null || executor.isTerminated()) return; + + try { + // Only call shutdown if it hasn't been called yet + if (!executor.isShutdown()) { + executor.shutdown(); + } + + boolean success = executor.awaitTermination(TIMEOUT_MINUTES, TimeUnit.MINUTES); + LOG.info("Tasks Finished within [{}] minute timeout ? {}", TIMEOUT_MINUTES, success); + + } catch (InterruptedException ie) { + LOG.warn("Shutdown interrupted, forcing immediate termination."); + Thread.currentThread().interrupt(); + } finally { + // If the 5 minute passed OR we were interrupted, + // and tasks are STILL running, we kill them now. + if (!executor.isTerminated()) { + List notStarted = executor.shutdownNow(); + LOG.warn("ExecutorService[{}]Terminated. NotStartedCount[{}]", errorLabel, notStarted.size()); + } + } + } } diff --git a/datavault-common/src/test/java/org/datavaultplatform/common/ldap/BaseLDAPServiceIT.java b/datavault-common/src/test/java/org/datavaultplatform/common/ldap/BaseLDAPServiceIT.java index 045249bf2..dc5d2c962 100644 --- a/datavault-common/src/test/java/org/datavaultplatform/common/ldap/BaseLDAPServiceIT.java +++ b/datavault-common/src/test/java/org/datavaultplatform/common/ldap/BaseLDAPServiceIT.java @@ -42,7 +42,7 @@ public abstract class BaseLDAPServiceIT { */ public static final String LDAP_ADMIN_PASSWORD = "test-password"; - private static final int LDAP_EXPOSED_PORT = 1389; + public static final int LDAP_EXPOSED_PORT = 1389; @Container private static final GenericContainer LDAP_CONTAINER = new GenericContainer<>(DockerImage.LDAP_IMAGE) diff --git a/datavault-common/src/test/java/org/datavaultplatform/common/task/TaskExecutorTest.java b/datavault-common/src/test/java/org/datavaultplatform/common/task/TaskExecutorTest.java index 6d6e5403a..be3fd5112 100644 --- a/datavault-common/src/test/java/org/datavaultplatform/common/task/TaskExecutorTest.java +++ b/datavault-common/src/test/java/org/datavaultplatform/common/task/TaskExecutorTest.java @@ -7,11 +7,13 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.Callable; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.*; -public class TaskExecutorTest { +class TaskExecutorTest { @Test void testSingleExecution() throws Exception { @@ -56,6 +58,28 @@ void testTaskExecutionException() { assertEquals("bob123", ex.getMessage()); } + @Test + void testTasksSubmittedBeforeErrorTaskCanRunToCompletion() { + List finishedOkay = new CopyOnWriteArrayList<>(); + TaskExecutor executor = new TaskExecutor<>(1, "Error"); + for (int taskNum = 1; taskNum <= 3; taskNum++) { + final String taskNumString = String.valueOf(taskNum); + executor.add(() -> { + String msg = "finishedOkay-" + taskNumString; + finishedOkay.add(msg); + return msg; + }); + } + executor.add(() -> { + throw new IOException("oops!"); + }); + IOException ex = assertThrows(IOException.class, () -> { + executor.execute(System.out::println); + }); + assertThat(ex).hasMessage("oops!"); + assertThat(finishedOkay).isEqualTo(List.of("finishedOkay-1","finishedOkay-2","finishedOkay-3")); + } + @Test void testNullTasksRejected() { TaskExecutor executor = new TaskExecutor<>(1, "Error"); @@ -92,9 +116,13 @@ void testParallelExecution() throws Exception { } private Callable getDelayedTask(int delaySecs, T result) { + return getDelayedTask(delaySecs, () -> result); + } + + private Callable getDelayedTask(int delaySecs, Callable result) { return () -> { TimeUnit.SECONDS.sleep(delaySecs); - return result; + return result.call(); }; } diff --git a/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/Delete.java b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/Delete.java index f90b74271..f9ced5797 100644 --- a/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/Delete.java +++ b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/Delete.java @@ -6,41 +6,47 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; +import lombok.AllArgsConstructor; import org.datavaultplatform.common.PropNames; +import org.datavaultplatform.common.event.*; import org.datavaultplatform.common.event.Error; -import org.datavaultplatform.common.event.EventSender; -import org.datavaultplatform.common.event.InitStates; -import org.datavaultplatform.common.event.UpdateProgress; import org.datavaultplatform.common.event.delete.DeleteComplete; import org.datavaultplatform.common.event.delete.DeleteStart; +import org.datavaultplatform.common.event.delete.DeletedChunk; import org.datavaultplatform.common.io.Progress; import org.datavaultplatform.common.storage.ArchiveStore; import org.datavaultplatform.common.storage.Device; import org.datavaultplatform.common.task.Context; import org.datavaultplatform.common.task.Task; +import org.datavaultplatform.common.task.TaskExecutor; import org.datavaultplatform.common.util.StorageClassNameResolver; import org.datavaultplatform.common.util.StorageClassUtils; import org.datavaultplatform.worker.operations.FileSplitter; import org.datavaultplatform.worker.operations.ProgressTracker; +import org.datavaultplatform.worker.tasks.delete.DeleteState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.util.Assert; + +import static java.util.Comparator.*; public class Delete extends Task { - private static final Logger logger = LoggerFactory.getLogger(Delete.class); + public static final String NO_LOCATION = "no-location"; + private static final Logger logger = LoggerFactory.getLogger(Delete.class); + // Maps the model ArchiveStore ID to the storage equivalent + private final HashMap archiveStores = new HashMap<>(); private String archiveId = null; - private String userID = null; private int numOfChunks = 0; - private String depositId = null; private long archiveSize = 0; private EventSender eventSender = null; - // Maps the model ArchiveStore ID to the storage equivalent - private final HashMap archiveStores = new HashMap<>(); + private boolean sendDeletedChunkEvents = false; - @Override + @Override public void performAction(Context context) { this.eventSender = context.getEventSender(); @@ -51,6 +57,7 @@ public void performAction(Context context) { this.userID = properties.get(PropNames.USER_ID); this.numOfChunks = Integer.parseInt(properties.get(PropNames.NUM_OF_CHUNKS)); this.archiveSize = Long.parseLong(properties.get(PropNames.ARCHIVE_SIZE)); + this.sendDeletedChunkEvents = Boolean.parseBoolean(properties.get(PropNames.WORKERS_SEND_DELETED_CHUNK_EVENTS)); if (this.isRedeliver()) { eventSender.send(new Error(this.jobID, this.depositId, "Delete stopped: the message had been redelivered, please investigate") @@ -60,116 +67,225 @@ public void performAction(Context context) { this.initStates(); + final TaskExecutor taskExecutor; + logger.info("bagID: {}", bagID); - //userStores = this.setupUserFileStores(); this.setupArchiveFileStores(context.getStorageClassNameResolver()); try { + taskExecutor = getTaskExecutor(context); + String tarFileName = bagID + ".tar"; Path tarPath = context.getTempDir().resolve(tarFileName); File tarFile = tarPath.toFile(); - eventSender.send(new DeleteStart(this.jobID, this.depositId).withNextState(0) + eventSender.send(new DeleteStart(this.jobID, this.depositId) + .withNextState(DeleteState.DeleteState00DeleteStart.getStateNumber()) .withUserId(this.userID)); eventSender.send(new UpdateProgress(this.jobID, this.depositId, 0, this.archiveSize, "Deposit delete started ...") .withUserId(this.userID)); - for (String archiveStoreId : archiveStores.keySet() ) { - ArchiveStore archiveStore = archiveStores.get(archiveStoreId); - this.archiveId = properties.get(archiveStoreId); + for (Map.Entry entry : archiveStores.entrySet()) { + ArchiveContext archiveContext = new ArchiveContext(entry); + this.archiveId = properties.get(archiveContext.archiveStoreId); logger.info("archiveId: {}", this.archiveId); - Device archiveFs = ((Device)archiveStore); - if(archiveFs.hasMultipleCopies()) { - deleteMultipleCopiesFromArchiveStorage(context, archiveFs, tarFileName, tarFile); + + if (archiveContext.hasMultipleCopies()) { + deleteMultipleCopiesFromArchiveStorage(taskExecutor, context, archiveContext, tarFileName, tarFile); } else { - deleteFromArchiveStorage(context, archiveFs, tarFileName, tarFile); + deleteFromArchiveStorage(taskExecutor, context, archiveContext, tarFileName, tarFile); } } + List deletedChunks = new ArrayList<>(); + taskExecutor.execute(deletedChunks::add); //this will throw an exception if there was an error with a single delete task + + sortAndLogDeletedChunks(deletedChunks); + logger.info("Sending delete complete event"); - eventSender.send(new DeleteComplete(this.jobID, this.depositId).withNextState(1) + eventSender.send(new DeleteComplete(this.jobID, this.depositId) + .withNextState(DeleteState.DeleteState01DeleteComplete.getStateNumber()) .withUserId(this.userID)); logger.info("Sent delete complete event"); - - } catch (Exception e) { - String msg = "Deposit delete failed: " + e.getMessage(); - logger.error(msg, e); - eventSender.send(new Error(jobID, depositId, msg) - .withUserId(userID)); - throw new RuntimeException(e); + + } catch (Exception ex) { + String msg = "Deposit delete failed: " + ex.getMessage(); + logger.error(msg, ex); + Event errEvent = new Error(jobID, depositId, msg) + .withUserId(userID); + errEvent.setArchiveId(archiveId); + if (ex instanceof DeleteFileException dfEx) { + errEvent.setArchiveStoreId(dfEx.getArchiveStoreId()); + errEvent.setLocation(dfEx.getLocation()); + errEvent.setChunkNumber(dfEx.getChunkNumber()); + } + eventSender.send(errEvent); + throw new RuntimeException(ex); } } - + + private void sortAndLogDeletedChunks(List deletedChunks) { + deletedChunks.sort( + comparing(DeletedChunk::getArchiveStoreId, nullsLast(naturalOrder())) + .thenComparing(DeletedChunk::getLocation, nullsLast(naturalOrder())) + .thenComparing(DeletedChunk::getChunkNumber, nullsLast(naturalOrder())) + ); + + // if we get here - we can assume all deletes worked + var size = deletedChunks.size(); + logger.info("Deleted [{}] Chunks", size); + for (int i = 0; i < size; i++) { + logger.info("Deleted Chunk[{}/{}][{}]", i + 1, size, deletedChunks.get(i)); + } + } + private void initStates() { - ArrayList states = new ArrayList<>(); - states.add("Deleting from archive"); // 0 - states.add("Delete complete"); // 1 - eventSender.send(new InitStates(this.jobID, this.depositId, states) - .withUserId(userID)); - } - + eventSender.send(new InitStates(this.jobID, this.depositId, DeleteState.getDeleteStates()) + .withUserId(userID)); + } private void setupArchiveFileStores(StorageClassNameResolver resolver) { - // Connect to the archive storage(s). Look out! There are two classes called archiveStore. - for (org.datavaultplatform.common.model.ArchiveStore archiveFileStore : archiveFileStores ) { - try { - ArchiveStore archiveStore = StorageClassUtils.createStorage( - archiveFileStore.getStorageClass(), - archiveFileStore.getProperties(), - ArchiveStore.class, resolver); - archiveStores.put(archiveFileStore.getID(), archiveStore); - } catch (Exception e) { - String msg = "Deposit failed: could not access archive filesystem : " + archiveFileStore.getStorageClass(); - logger.error(msg, e); - eventSender.send(new Error(this.jobID, this.depositId, msg).withUserId(this.userID)); - throw new RuntimeException(e); - } - } + // Connect to the archive storage(s). Look out! There are two classes called archiveStore. + for (org.datavaultplatform.common.model.ArchiveStore archiveFileStore : archiveFileStores) { + try { + ArchiveStore archiveStore = StorageClassUtils.createStorage( + archiveFileStore.getStorageClass(), + archiveFileStore.getProperties(), + ArchiveStore.class, resolver); + archiveStores.put(archiveFileStore.getID(), archiveStore); + } catch (Exception e) { + String msg = "Deposit failed: could not access archive filesystem : " + archiveFileStore.getStorageClass(); + logger.error(msg, e); + eventSender.send(new Error(this.jobID, this.depositId, msg).withUserId(this.userID)); + throw new RuntimeException(e); + } + } } - - private void deleteMultipleCopiesFromArchiveStorage(Context context, Device archiveFs, String tarFileName, File tarFile) throws Exception { - Progress progress = new Progress(); + private void deleteMultipleCopiesFromArchiveStorage(TaskExecutor taskExecutor, Context context, ArchiveContext archiveContext, String tarFileName, File tarFile) throws Exception { + + final Progress progress = new Progress(); ProgressTracker tracker = new ProgressTracker(progress, this.jobID, this.depositId, this.archiveSize, this.eventSender); tracker.track(() -> { + + DeleteContext deleteContext = new DeleteContext(archiveContext, progress, taskExecutor); + logger.info("deleteMultipleCopiesFromArchiveStorage for deposit : {}", this.depositId); - List locations = archiveFs.getLocations(); - for (String location : locations) { + for (String location : archiveContext.getLocations()) { logger.info("Delete from location : {}", location); if (context.isChunkingEnabled()) { for (int chunkNum = 1; chunkNum <= this.numOfChunks; chunkNum++) { Path chunkPath = context.getTempDir().resolve(tarFileName + FileSplitter.CHUNK_SEPARATOR + chunkNum); File chunkFile = chunkPath.toFile(); String chunkArchiveId = this.archiveId + FileSplitter.CHUNK_SEPARATOR + chunkNum; - archiveFs.delete(chunkArchiveId, chunkFile, progress, location); - logger.info("---------deleteMultipleCopiesFromArchiveStorage ------chunkArchiveId Deleted---- {} ", chunkArchiveId); + deleteContext.deleteChunkWithLocation(chunkNum, location, chunkArchiveId, chunkFile); } } else { - archiveFs.delete(this.archiveId, tarFile, progress, location); - logger.info("---------deleteMultipleCopiesFromArchiveStorage ------archiveId Deleted---- {} ", this.archiveId); + deleteContext.deleteChunkWithLocation(0, location, this.archiveId, tarFile); } } }); } - - private void deleteFromArchiveStorage(Context context, Device archiveFs, String tarFileName, File tarFile) throws Exception { + + private void deleteFromArchiveStorage(TaskExecutor taskExecutor, Context context, + ArchiveContext archiveContext, String tarFileName, File tarFile) throws Exception { Progress progress = new Progress(); ProgressTracker tracker = new ProgressTracker(progress, this.jobID, this.depositId, this.archiveSize, this.eventSender); tracker.track(() -> { + DeleteContext deleteContext = new DeleteContext(archiveContext, progress, taskExecutor); + logger.info("deleteFromArchiveStorage for deposit : {}", this.depositId); if (context.isChunkingEnabled()) { for (int chunkNum = 1; chunkNum <= this.numOfChunks; chunkNum++) { Path chunkPath = context.getTempDir().resolve(tarFileName + FileSplitter.CHUNK_SEPARATOR + chunkNum); File chunkFile = chunkPath.toFile(); String chunkArchiveId = this.archiveId + FileSplitter.CHUNK_SEPARATOR + chunkNum; - archiveFs.delete(chunkArchiveId, chunkFile, progress); - logger.info("---------deleteFromArchiveStorage ------chunkArchiveId Deleted---- {} ", chunkArchiveId); + deleteContext.deleteChunkNoLocation(chunkNum, chunkArchiveId, chunkFile); } } else { - archiveFs.delete(this.archiveId, tarFile, progress); - logger.info("---------deleteFromArchiveStorage ------archiveId Deleted---- {} ", this.archiveId); + deleteContext.deleteChunkNoLocation(0, this.archiveId, tarFile); } }); } + + private TaskExecutor getTaskExecutor(Context context) { + int noOfThreads = context.getNoChunkThreads(); + logger.debug("Number of threads: [{}]", noOfThreads); + return new TaskExecutor<>(noOfThreads, "Delete depositId[%s]jobId[%s]numberOfChunks[%s] failed.".formatted(depositId, jobID, numOfChunks)); + } + + public record ArchiveContext(String archiveStoreId, ArchiveStore archiveStore) { + + public ArchiveContext { + Assert.isTrue(archiveStoreId != null, "archiveStoreId cannot be null"); + Assert.isTrue(archiveStore != null, "archiveStore cannot be null"); + } + + public ArchiveContext(Map.Entry entry) { + this(entry.getKey(), entry.getValue()); + } + + public Device getDevice() { + if (!(archiveStore instanceof Device device)) { + throw new IllegalStateException("ArchiveContext [%s] is not a Device".formatted(archiveStore.getClass().getSimpleName())); + } + return device; + } + + public boolean hasMultipleCopies() { + return Boolean.TRUE.equals(getDevice().hasMultipleCopies()); + } + + public List getLocations() { + return getDevice().getLocations(); + } + } + + @AllArgsConstructor + class DeleteContext { + private final ArchiveContext archiveContext; + private final Progress progress; + private final TaskExecutor taskExecutor; + + private DeletedChunk sendDeletedChunkEvent(int chunkNumber, String location) { + DeletedChunk event = new DeletedChunk(jobID, depositId, chunkNumber, numOfChunks, archiveContext.archiveStore().getClass(), archiveContext.archiveStoreId, location); + event.setUserId(userID); + event.setArchiveId(archiveId); + if (sendDeletedChunkEvents) { + logger.info("SENDING {}", event); + eventSender.send(event); + } else { + logger.info("NOT SENDING {}", event); + } + return event; + } + + // this ensures any Exception will always be recorded with the chunkNumber + private void addDeleteFileTask(int chunkNum, String location, Callable task) { + taskExecutor.add(() -> { + try { + return task.call(); + } catch (Exception ex) { + throw new DeleteFileException(archiveContext, location, chunkNum, ex); + } + }); + } + + void deleteChunkNoLocation(int chunkNum, String chunkArchiveId, File chunkFile) { + addDeleteFileTask(chunkNum, NO_LOCATION, () -> { + Device archiveFs = archiveContext.getDevice(); + archiveFs.delete(chunkArchiveId, chunkFile, progress); + return sendDeletedChunkEvent(chunkNum, NO_LOCATION); + }); + } + + void deleteChunkWithLocation(int chunkNum, String location, String chunkArchiveId, File chunkFile) { + addDeleteFileTask(chunkNum, location, () -> { + Device archiveFs = archiveContext.getDevice(); + archiveFs.delete(chunkArchiveId, chunkFile, progress, location); + return sendDeletedChunkEvent(chunkNum, location); + }); + } + } } \ No newline at end of file diff --git a/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/DeleteFileException.java b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/DeleteFileException.java new file mode 100644 index 000000000..d9369ba9f --- /dev/null +++ b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/DeleteFileException.java @@ -0,0 +1,26 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.Getter; +import org.datavaultplatform.common.storage.ArchiveStore; + +@Getter +public class DeleteFileException extends Exception { + private final int chunkNumber; + private final String archiveStoreId; + private final String location; + + public DeleteFileException(Class archiveStoreClass, String archiveStoreId, String location, int chunkNumber, Exception ex) { + super("ArchiveStore[%s/%s]Location[%s]ChunkNum[%d]Cause[%s]".formatted(archiveStoreClass.getSimpleName(), archiveStoreId, location, chunkNumber, getCause(ex)), ex); + this.chunkNumber = chunkNumber; + this.archiveStoreId = archiveStoreId; + this.location = location; + } + + public DeleteFileException(Delete.ArchiveContext archiveContext, String location, int chunkNumber, Exception ex) { + this(archiveContext.archiveStore().getClass(), archiveContext.archiveStoreId(), location, chunkNumber, ex); + } + + private static String getCause(Exception ex) { + return "%s/%s".formatted(ex.getClass().getName(), ex.getMessage()); + } +} diff --git a/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/delete/DeleteState.java b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/delete/DeleteState.java new file mode 100644 index 000000000..da0b74f0b --- /dev/null +++ b/datavault-worker/src/main/java/org/datavaultplatform/worker/tasks/delete/DeleteState.java @@ -0,0 +1,28 @@ +package org.datavaultplatform.worker.tasks.delete; + +import lombok.Getter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.stream.Collectors; + +@Getter +public enum DeleteState { + + DeleteState00DeleteStart(0, "Deleting from archive"), + DeleteState01DeleteComplete(1, "Delete complete"); + + private final int stateNumber; + private final String description; + + DeleteState(int stateNumber, String description) { + this.stateNumber = stateNumber; + this.description = description; + } + + public static ArrayList getDeleteStates() { + return Arrays.stream(values()) + .map(DeleteState::getDescription) + .collect(Collectors.toCollection(ArrayList::new)); + } +} diff --git a/datavault-worker/src/main/java/org/datavaultplatform/worker/utils/DepositEvents.java b/datavault-worker/src/main/java/org/datavaultplatform/worker/utils/DepositEvents.java index 1008bb102..bcbd9d29a 100644 --- a/datavault-worker/src/main/java/org/datavaultplatform/worker/utils/DepositEvents.java +++ b/datavault-worker/src/main/java/org/datavaultplatform/worker/utils/DepositEvents.java @@ -26,6 +26,7 @@ import org.datavaultplatform.common.storage.Verify; import org.datavaultplatform.worker.tasks.Audit; import org.datavaultplatform.common.util.DateTimeUtils; +import org.datavaultplatform.worker.tasks.Delete; import org.datavaultplatform.worker.tasks.Deposit; import org.datavaultplatform.worker.tasks.Retrieve; import org.springframework.util.Assert; @@ -71,7 +72,7 @@ public String generateRetrieveMessage(File retrieveBaseDir, String retrievePath) retrieve.setProperties(topLevelProps); retrieve.setTarIV(info.tarIV); - retrieve.setArchiveFileStores(List.of(getArchiveStoreForRetrieve())); + retrieve.setArchiveFileStores(getArchiveStoresForRetrieve()); //CHUNKS retrieve.setChunkFilesDigest(info.chunkDigests); retrieve.setEncChunksDigest(info.chunkEncDigests); @@ -94,6 +95,63 @@ public String generateRetrieveMessage(File retrieveBaseDir, String retrievePath) return result; } + @SneakyThrows + public String generateDeleteMessage() { + + TaskInfo info = getTaskInfo(null, null); + + Delete delete = new Delete(); + delete.setJobID(UUID.randomUUID().toString()); + delete.setTaskClass(Delete.class.getName()); + Map topLevelProps = new HashMap<>(); + topLevelProps.put(PropNames.DEPOSIT_ID, "test-deposit-id"); + topLevelProps.put(PropNames.ARCHIVE_DIGEST_ALGORITHM, Verify.SHA_1_ALGORITHM); + topLevelProps.put(PropNames.BAG_ID, info.bagitId); + topLevelProps.put(PropNames.NUM_OF_CHUNKS, String.valueOf(info.numChunks)); + topLevelProps.put(PropNames.ARCHIVE_SIZE, String.valueOf(info.archiveSize)); + topLevelProps.put(PropNames.ARCHIVE_DIGEST, info.archiveDigest); + topLevelProps.put(PropNames.ARCHIVE_ID, info.archiveId); + topLevelProps.put(PropNames.DEPOSIT_CREATION_DATE, "20240111"); + topLevelProps.put(PropNames.USER_FS_RETRY_MAX_ATTEMPTS, "10"); + topLevelProps.put(PropNames.USER_FS_RETRY_DELAY_MS_1, "60000"); + topLevelProps.put(PropNames.USER_FS_RETRY_DELAY_MS_2, "300000"); + topLevelProps.put(PropNames.WORKERS_SEND_DELETED_CHUNK_EVENTS, "true"); + Instant testInstant = LocalDate.of(2024, 1, 11) + .atStartOfDay(ZoneOffset.UTC) + .toInstant(); + Date testDate = Date.from(testInstant); + topLevelProps.put(PropNames.DEPOSIT_CREATION_DATE, DateTimeUtils.formatDateBasicISO(testDate)); + + delete.setProperties(topLevelProps); + delete.setTarIV(info.tarIV); + delete.setArchiveFileStores(getArchiveStoresForRetrieve()); + //TEMP? - trying to fix problem + for(var as : delete.getArchiveFileStores()){ + topLevelProps.put(as.getID(), info.archiveId); + } + //TEMP? - trying to fix problem + + //CHUNKS + delete.setChunkFilesDigest(info.chunkDigests); + delete.setEncChunksDigest(info.chunkEncDigests); + delete.setChunksIVs(info.chunkIVsAsBytes); + + delete.setIsRedeliver(false); + Map userFileStoreClasses = new HashMap<>(); + + Map propsInner = new HashMap<>(); + propsInner.put(PropNames.ROOT_PATH, null); + Map> propsOuter = new HashMap<>(); + propsOuter.put(FILE_STORE_SRC_ID, propsInner); + delete.setUserFileStoreProperties(propsOuter); + userFileStoreClasses.put(FILE_STORE_SRC_ID, StorageConstants.LOCAL_FILE_SYSTEM); + delete.setUserFileStoreClasses(userFileStoreClasses); + + ObjectMapper mapper = new ObjectMapper(); + mapper.setSerializationInclusion(Include.NON_NULL); + String result = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(delete); + return result; + } /** * By using multiple DepositEvents - we can audit multiple deposits * @param allDepositEvents - information about the deposits to audit @@ -114,7 +172,7 @@ public static String generateAuditMessage(List allDepositEvents) Map topLevelProps = new HashMap<>(); topLevelProps.put(PropNames.AUDIT_ID, "test-audit-id"); audit.setProperties(topLevelProps); - audit.setArchiveFileStores(Collections.singletonList(depositEvents1.getArchiveStoreForRetrieve())); + audit.setArchiveFileStores(depositEvents1.getArchiveStoresForRetrieve()); audit.setIsRedeliver(false); TaskInfo info = depositEvents1.getTaskInfo(null, null); @@ -159,19 +217,16 @@ public static String generateAuditMessage(List allDepositEvents) ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); String result = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(audit); - System.out.println(result); - - return result; } - @SuppressWarnings("UnnecessaryLocalVariable") - public ArchiveStore getArchiveStoreForRetrieve() { - ArchiveStore result = deposit.getArchiveFileStores().stream() - .filter(ArchiveStore::isRetrieveEnabled) - .findFirst() - .orElse(null); + public List getArchiveStoresForRetrieve() { + List result = deposit.getArchiveFileStores().stream() + .filter(Objects::nonNull) + .filter(ArchiveStore::isRetrieveEnabled) + .toList(); + Assert.isTrue(!result.isEmpty(), "must have at least 1 ArchiveStore"); return result; } @@ -213,9 +268,9 @@ public TaskInfo getTaskInfo(File retrieveBaseDir, String retrievePath) { long archiveSize = getComplete().getArchiveSize(); - String archiveDigest = getComputedDigest().getDigest(); - - String archiveId = getUploadComplete().getArchiveIds().get(getArchiveStoreForRetrieve().getID()); + ArchiveStore firstArchiveStore = getArchiveStoresForRetrieve().get(0); + String archiveId = getUploadComplete().getArchiveIds().get(firstArchiveStore.getID()); + Assert.notNull(archiveId, String.format("Cannot find archiveId for archiveStoreId[%s]", firstArchiveStore.getID())); Map chunkIVs = new HashMap<>(); Map chunkIVsAsBytes = new HashMap<>(); @@ -239,7 +294,15 @@ public TaskInfo getTaskInfo(File retrieveBaseDir, String retrievePath) { computedEncryption.getEncChunkDigests().get(chunkNumber)); } - String rootPathArchiveStore = getArchiveStoreForRetrieve().getProperties().get(PropNames.ROOT_PATH); + List rootPathArchiveStores = getArchiveStoresForRetrieve().stream() + .map(ArchiveStore::getProperties) + .map(hm -> hm.get(PropNames.ROOT_PATH)) + .filter(Objects::nonNull) + .toList(); + + if (rootPathArchiveStores.isEmpty()) { + log.warn("empty list of rootPathArchiveStores"); + } String rootPathRetrieve = null; if (retrieveBaseDir != null) { rootPathRetrieve = retrieveBaseDir.getCanonicalPath(); @@ -249,7 +312,7 @@ public TaskInfo getTaskInfo(File retrieveBaseDir, String retrievePath) { .numChunks(numChunks) .archiveSize(archiveSize) .archiveId(archiveId) - .rootPathArchiveStore(rootPathArchiveStore) + .rootPathArchiveStores(rootPathArchiveStores) .rootPathRetrieve(rootPathRetrieve) .retrievePath(retrievePath) .tarIV(tarIV) @@ -279,7 +342,7 @@ public static class TaskInfo { private final Map chunkIVs; private final Map chunkIVsAsBytes; - private final String rootPathArchiveStore; + private final List rootPathArchiveStores; private final String rootPathRetrieve; private final String retrievePath; @@ -309,19 +372,6 @@ public List getChunkInfo() { Collections.sort(result); return result; } - - private HashMap zeroBased(Map source){ - HashMap result = new HashMap<>(); - - source.entrySet().forEach(entry -> { - Integer key = entry.getKey(); - T value = entry.getValue(); - result.put(key - 1, value); - }); - - return result; - } - } private static String base64Encode(byte[] data) { @@ -333,7 +383,7 @@ private static String base64Encode(byte[] data) { public static class ChunkInfo implements Comparable { private final int chunkNum; private final String bagitId; - private final String archiveId; + private final String archiveId; // there is only archiveId - even if we have multiple archive stores. private final String chunkDigest; private final String chunkEncDigest; private final String chunkIVAsString; diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositIT.java index cce4a8de4..a6deb9ab8 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositIT.java @@ -14,6 +14,7 @@ import org.datavaultplatform.common.event.deposit.ComputedDigest; import org.datavaultplatform.common.event.deposit.ComputedEncryption; import org.datavaultplatform.common.storage.Verify; +import org.datavaultplatform.common.storage.impl.LocalFileSystem; import org.datavaultplatform.common.task.Context.AESMode; import org.datavaultplatform.common.util.TestUtils; import org.datavaultplatform.worker.rabbit.BaseRabbitIT; @@ -47,6 +48,7 @@ import java.time.Duration; import java.util.*; import java.util.concurrent.Callable; +import java.util.regex.Pattern; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.*; @@ -116,6 +118,8 @@ static Set getPathsWithinTarFile(File tarFile) { @SneakyThrows static void setupProperties(DynamicPropertyRegistry registry) { File baseTemp = Files.createTempDirectory("test").toFile(); + FileUtils.cleanDirectory(baseTemp); + File tempDir = new File(baseTemp, "temp"); assertTrue(tempDir.mkdir()); @@ -173,19 +177,25 @@ final void setupDirectoriesAndFiles() { assertThat(baseTemp).exists(); assertThat(baseTemp).isDirectory(); - sourceDir = baseTemp.resolve("source").toFile(); + sourceDir = baseTemp.resolve("user-from").toFile(); assertTrue(sourceDir.mkdir()); - destDir = baseTemp.resolve("dest").toFile(); - assertTrue(destDir.mkdir()); - retrieveBaseDir = baseTemp.resolve("retrieve").toFile(); + retrieveBaseDir = baseTemp.resolve("user-to").toFile(); assertTrue(retrieveBaseDir.mkdir()); retrieveDir = retrieveBaseDir.toPath().resolve("ret-folder").toFile(); assertTrue(retrieveDir.mkdir()); log.info("meta.dir [{}]", metaDir); log.info("temp.dir [{}]", tempDir); log.info("source dir [{}]", sourceDir); + setupDestDirs(); + } + @SneakyThrows + void setupDestDirs() { + assertThat(this.destDir).isNull(); + Path baseTemp = Paths.get(this.tempDir); + destDir = baseTemp.resolve("worker-store").toFile(); + assertTrue(destDir.mkdir()); log.info("dest dir [{}]", destDir); - + assertThat(this.destDir).exists(); } final void setupSourceDirectory(String srcPath) throws Exception{ @@ -245,18 +255,18 @@ final void waitUntil(Callable test) { abstract Optional getExpectedNumberChunksPerDeposit(); - final void checkDepositWorkedOkay(String depositMessage, DepositEvents depositEvents){ - checkDepositWorkedOkay(SRC_PATH_DEFAULT, depositMessage, depositEvents); + void checkDepositWorkedOkay(String depositMessage, DepositEvents depositEvents){ + checkDepositWorkedOkayInternal(destDir, SRC_PATH_DEFAULT, depositMessage, depositEvents); } @SneakyThrows - final void checkDepositWorkedOkay(String srcPath, String depositMessage, DepositEvents depositEvents) { + final void checkDepositWorkedOkayInternal(File destDirectory, String srcPath, String depositMessage, DepositEvents depositEvents) { Deposit deposit = mapper.readValue(depositMessage, Deposit.class); String bagId = deposit.getProperties().get("bagId"); log.info("BROKER MSG COUNT {}", events.size()); - String[] depositFileNames = destDir.list((dir, name) -> dir.equals(destDir) && name.startsWith(bagId)); - File[] destFiles = Arrays.stream(depositFileNames).map(fn -> new File(destDir, fn)).sorted().toArray(File[]::new); + String[] depositFileNames = destDirectory.list((dir, name) -> dir.equals(destDirectory) && name.startsWith(bagId)); + File[] destFiles = Arrays.stream(depositFileNames).map(fn -> new File(destDirectory, fn)).sorted(new ChunkFileComparator()).toArray(File[]::new); int expectedNumDepositFiles = getExpectedNumberChunksPerDeposit().isPresent() ? getExpectedNumberChunksPerDeposit().get() : 1; assertEquals(expectedNumDepositFiles, depositFileNames.length); @@ -277,7 +287,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit int expectedNumberChunks = getExpectedNumberChunksPerDeposit().get(); for (int chunkNum = 1; chunkNum <= expectedNumberChunks; chunkNum++) { - File expectedEncChunk = destDir.toPath().resolve(bagId + ".tar." + chunkNum).toFile(); + File expectedEncChunk = destDirectory.toPath().resolve(bagId + ".tar." + chunkNum).toFile(); assertEquals(expectedEncChunk, destFiles[chunkNum - 1]); chunkNumToEncChunk.put(chunkNum, expectedEncChunk); } @@ -297,7 +307,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit FileUtils.copyFile(expectedEncChunk, decryptedChunkFile); Encryption.decryptFile(aesMode, decryptedChunkFile, iv); assertTrue(decryptedChunkFile.length() > 0); - assertTrue(decryptedChunkFile.length() != expectedEncChunk.length()); + assertNotEquals(decryptedChunkFile.length(), expectedEncChunk.length()); } decryptedTarFile = Files.createTempFile("decryptedTar", ".plain").toFile(); @@ -310,7 +320,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit } } else { - File expectedEncTar = destDir.toPath().resolve(bagId + ".tar").toFile(); + File expectedEncTar = destDirectory.toPath().resolve(bagId + ".tar").toFile(); assertEquals(expectedEncTar, destFiles[0]); String encTarHash = computedEncryption.getEncTarDigest(); @@ -322,7 +332,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit FileUtils.copyFile(destFiles[0], decryptedTarFile); Encryption.decryptFile(aesMode, decryptedTarFile, iv); assertTrue(decryptedTarFile.length() > 0); - assertTrue(decryptedTarFile.length() != expectedEncTar.length()); + assertNotEquals(decryptedTarFile.length(), expectedEncTar.length()); } Set tarEntryPaths = getPathsWithinTarFile(decryptedTarFile); @@ -348,18 +358,28 @@ final String getSampleDepositMessage() { return getSampleDepositMessage(SRC_PATH_DEFAULT, BAG_ID_DEFAULT); } + @SneakyThrows + String getArchiveStoreRootPath() { + return destDir.getCanonicalPath(); + } + + String getArchiveStoreClassName() { + return LocalFileSystem.class.getName(); + } + @SuppressWarnings("UnnecessaryLocalVariable") @SneakyThrows - final String getSampleDepositMessage(String srcPath, String bagId) { + String getSampleDepositMessage(String srcPath, String bagId) { String temp1 = FileUtils.readFileToString(this.depositMessage.getFile(), StandardCharsets.UTF_8); String temp2 = temp1.replaceAll("/tmp/dv/src", sourceDir.getCanonicalPath()); - String temp3 = temp2.replaceAll("/tmp/dv/dest", destDir.getCanonicalPath()); + String temp3 = temp2.replaceAll("/tmp/dv/dest", getArchiveStoreRootPath()); String temp4 = temp3.replaceAll("src-path-1", srcPath); String temp5 = temp4.replaceAll("bf73a7f5-42d1-4c3f-864a-a171af8373d4", bagId); - return temp5; + String temp6 = temp5.replaceFirst(Pattern.quote(LocalFileSystem.class.getName()), getArchiveStoreClassName()); + return temp6; } - + @RabbitListener(queues = BaseQueueConfig.BROKER_QUEUE_NAME) @SneakyThrows final void receiveBrokerMessage(Message message, Channel channel, diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositRestartIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositRestartIT.java index 1a48212e7..6693c020f 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositRestartIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositRestartIT.java @@ -57,10 +57,8 @@ public abstract class BaseDepositRestartIT extends BaseRabbitIT { static final String SRC_PATH_1 = "src-path-a"; - static final String SRC_PATH_2 = "src-path-b"; static final String SRC_PATH_DEFAULT = SRC_PATH_1; static final String BAG_ID_1 = "d87ca007-9cee-4c49-8169-f74c2b90b773"; - static final String BAG_ID_2 = "bf73a7f5-42d1-4c3f-864a-a171af8373d4"; static final String BAG_ID_DEFAULT = BAG_ID_1; static final String KEY_NAME_FOR_SSH = "key-name-for-ssh"; @@ -303,7 +301,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit FileUtils.copyFile(expectedEncChunk, decryptedChunkFile); Encryption.decryptFile(aesMode, decryptedChunkFile, iv); assertTrue(decryptedChunkFile.length() > 0); - assertTrue(decryptedChunkFile.length() != expectedEncChunk.length()); + assertNotEquals(decryptedChunkFile.length(), expectedEncChunk.length()); } decryptedTarFile = Files.createTempFile("decryptedTar", ".plain").toFile(); @@ -328,7 +326,7 @@ final void checkDepositWorkedOkay(String srcPath, String depositMessage, Deposit FileUtils.copyFile(destFiles[0], decryptedTarFile); Encryption.decryptFile(aesMode, decryptedTarFile, iv); assertTrue(decryptedTarFile.length() > 0); - assertTrue(decryptedTarFile.length() != expectedEncTar.length()); + assertNotEquals(decryptedTarFile.length(), expectedEncTar.length()); } Set tarEntryPaths = getPathsWithinTarFile(decryptedTarFile); @@ -399,5 +397,7 @@ boolean foundComplete() { .anyMatch(e -> e.getClass().equals(Complete.class)); } } + + } \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoArchivesIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoArchivesIT.java new file mode 100644 index 000000000..855e25097 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoArchivesIT.java @@ -0,0 +1,85 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.datavaultplatform.common.storage.impl.MultiLocalFileSystem; +import org.datavaultplatform.worker.utils.DepositEvents; +import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.Resource; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Slf4j +public abstract class BaseDepositTwoArchivesIT extends BaseDepositIT { + + File destDir1; + File destDir2; + + @SneakyThrows + @Override + void setupDestDirs() { + assertThat(this.destDir).isNull(); + assertThat(this.destDir1).isNull(); + assertThat(this.destDir2).isNull(); + Path baseTemp = Paths.get(this.tempDir); + destDir1 = baseTemp.resolve("worker-store-1").toFile(); + assertTrue(destDir1.mkdir()); + destDir2 = baseTemp.resolve("worker-store-2").toFile(); + assertTrue(destDir2.mkdir()); + log.info("dest dir1 [{}]", destDir1); + log.info("dest dir2 [{}]", destDir2); + assertThat(this.destDir).isNull(); + assertThat(this.destDir1).exists(); + assertThat(this.destDir2).exists(); + } + + @Override + void checkDepositWorkedOkay(String depositMessage, DepositEvents depositEvents){ + checkDepositWorkedOkayInternal(destDir1, SRC_PATH_DEFAULT, depositMessage, depositEvents); + checkDepositWorkedOkayInternal(destDir2, SRC_PATH_DEFAULT, depositMessage, depositEvents); + } + + @Override + @SneakyThrows + String getArchiveStoreRootPath() { + return destDir1.getCanonicalPath(); + } + @SneakyThrows + String getArchiveStoreRootPath1() { + return getArchiveStoreRootPath(); + } + @SneakyThrows + String getArchiveStoreRootPath2() { + return destDir2.getCanonicalPath(); + } + + @Override + String getArchiveStoreClassName() { + return MultiLocalFileSystem.class.getName(); + } + + @SuppressWarnings("UnnecessaryLocalVariable") + @SneakyThrows + @Override + String getSampleDepositMessage(String srcPath, String bagId) { + Resource depositMessageTwoArchiveStores = new ClassPathResource("sampleMessages/sampleDepositMessageTwoArchiveStores.json"); + + String temp1 = FileUtils.readFileToString(depositMessageTwoArchiveStores.getFile(), + StandardCharsets.UTF_8); + String temp2 = temp1.replaceAll("/tmp/dv/src", sourceDir.getCanonicalPath()); + String temp3 = temp2.replaceAll("/tmp/dv/dest1", getArchiveStoreRootPath1()); + String temp4 = temp3.replaceAll("/tmp/dv/dest2", getArchiveStoreRootPath2()); + String temp5 = temp4.replaceAll("src-path-1", srcPath); + String temp6 = temp5.replaceAll("bf73a7f5-42d1-4c3f-864a-a171af8373d4", bagId); + + return temp6; + } + +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoLocationsIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoLocationsIT.java new file mode 100644 index 000000000..b7daaac81 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BaseDepositTwoLocationsIT.java @@ -0,0 +1,57 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.common.storage.impl.MultiLocalFileSystem; +import org.datavaultplatform.worker.utils.DepositEvents; +import org.junit.jupiter.api.Order; + +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.*; + +@Slf4j +public abstract class BaseDepositTwoLocationsIT extends BaseDepositIT { + + File destDir1; + File destDir2; + + @SneakyThrows + @Override + void setupDestDirs() { + assertThat(this.destDir).isNull(); + assertThat(this.destDir1).isNull(); + assertThat(this.destDir2).isNull(); + Path baseTemp = Paths.get(this.tempDir); + destDir1 = baseTemp.resolve("worker-store-1").toFile(); + assertTrue(destDir1.mkdir()); + destDir2 = baseTemp.resolve("worker-store-2").toFile(); + assertTrue(destDir2.mkdir()); + log.info("dest dir1 [{}]", destDir1); + log.info("dest dir2 [{}]", destDir2); + assertThat(this.destDir).isNull(); + assertThat(this.destDir1).exists(); + assertThat(this.destDir2).exists(); + } + + @Override + void checkDepositWorkedOkay(String depositMessage, DepositEvents depositEvents){ + checkDepositWorkedOkayInternal(destDir1, SRC_PATH_DEFAULT, depositMessage, depositEvents); + checkDepositWorkedOkayInternal(destDir2, SRC_PATH_DEFAULT, depositMessage, depositEvents); + } + + @Override + @SneakyThrows + String getArchiveStoreRootPath() { + return destDir1.getCanonicalPath() + "," + destDir2.getCanonicalPath(); + } + + @Override + String getArchiveStoreClassName() { + return MultiLocalFileSystem.class.getName(); + } + +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteIT.java new file mode 100644 index 000000000..a979782b3 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteIT.java @@ -0,0 +1,110 @@ +package org.datavaultplatform.worker.tasks; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.awaitility.core.ConditionTimeoutException; +import org.datavaultplatform.common.event.delete.DeleteComplete; +import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; +import org.datavaultplatform.worker.utils.DepositEvents; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@Slf4j +abstract class BasePerformDepositThenDeleteIT extends BaseDepositIT implements DepositThenDelete { + + @Override + void taskSpecificSetup() { + } + + abstract void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize); + + @Test + @SneakyThrows + void testDepositThenDelete() { + assertEquals(0, destDir.listFiles().length); + String depositMessage = getSampleDepositMessage(); + Deposit deposit = new ObjectMapper().readValue(depositMessage, Deposit.class); + log.info("depositMessage {}", depositMessage); + sendNormalMessage(depositMessage); + waitUntil(this::foundComplete); + + DepositEvents depositEvents = new DepositEvents(deposit, this.events); + checkDepositEvents(); + this.events.clear(); + checkDepositWorkedOkay(depositMessage, depositEvents); + + List expectedFiles = getExpectedDepositFiles(depositMessage); + for (File file : expectedFiles) { + log.info("Checking file [{}] exists and non-empty", file.getCanonicalPath()); + assertThat(file).exists(); + assertThat(file).isNotEmpty(); + } + + buildAndSendDeleteMessage(depositEvents); + + try { + waitUntil(this::foundDeleteComplete); + } catch (ConditionTimeoutException ex) { + log.error("Timed Out waiting for DeleteComplete", ex); + } + checkDeleteEvents(this.events); + + for (File file : expectedFiles) { + log.info("Checking file [{}] has been deleted", file.getCanonicalPath()); + assertThat(file).doesNotExist(); + } + + } + + @SneakyThrows + private List getExpectedDepositFiles(String depositMessage) { + Deposit deposit = mapper.readValue(depositMessage, Deposit.class); + String bagId = deposit.getProperties().get("bagId"); + + // create path from this.destDir + Path baseDest = this.destDir.toPath(); + + List files = new ArrayList<>(); + if (chunkingEnabled) { + int numberOfChunks = getExpectedNumberChunksPerDeposit().orElseThrow(); + for (int chunkNum = 1; chunkNum <= numberOfChunks; chunkNum++) { + File chunkTarFile = baseDest.resolve(bagId + ".tar." + chunkNum).toFile(); + files.add(chunkTarFile); + } + } else { + File singleTarFile = baseDest.resolve(bagId + ".tar").toFile(); + files.add(singleTarFile); + } + return files; + } + + public List getCopyUploadCompleteEvents(){ + return events.stream() + .filter(e -> e.getClass().equals(CompleteCopyUpload.class)) + .map(CompleteCopyUpload.class::cast) + .toList(); + } + + boolean foundDeleteComplete() { + return events.stream() + .anyMatch(e -> e.getClass().equals(DeleteComplete.class)); + } + + @SneakyThrows + private void buildAndSendDeleteMessage(DepositEvents depositEvents) { + String deleteMessage = depositEvents.generateDeleteMessage(); + sendNormalMessage(deleteMessage); + } + + public final long getArchiveCount() { + return 1; + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoArchivesIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoArchivesIT.java new file mode 100644 index 000000000..4f67e6d06 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoArchivesIT.java @@ -0,0 +1,122 @@ +package org.datavaultplatform.worker.tasks; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.awaitility.core.ConditionTimeoutException; +import org.datavaultplatform.common.event.delete.DeleteComplete; +import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; +import org.datavaultplatform.common.storage.impl.LocalFileSystem; +import org.datavaultplatform.worker.utils.DepositEvents; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@Slf4j +abstract class BasePerformDepositThenDeleteTwoArchivesIT extends BaseDepositTwoArchivesIT implements DepositThenDelete { + + @Override + void taskSpecificSetup() { + } + + abstract void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize); + + @Test + @SneakyThrows + void testDepositThenDelete() { + assertEquals(0, destDir1.listFiles().length); + assertEquals(0, destDir2.listFiles().length); + String depositMessage = getSampleDepositMessage(); + Deposit deposit = new ObjectMapper().readValue(depositMessage, Deposit.class); + log.info("depositMessage {}", depositMessage); + sendNormalMessage(depositMessage); + waitUntil(this::foundComplete); + + DepositEvents depositEvents = new DepositEvents(deposit, this.events); + checkDepositEvents(); + this.events.clear(); + + checkDepositWorkedOkay(depositMessage, depositEvents); + + List expectedFiles = getExpectedDepositFiles(depositMessage); + for (File file : expectedFiles) { + log.info("Checking file [{}] exists and non-empty", file.getCanonicalPath()); + assertThat(file).exists(); + assertThat(file).isNotEmpty(); + } + + buildAndSendDeleteMessage(depositEvents); + + try { + waitUntil(this::foundDeleteComplete); + } catch (ConditionTimeoutException ex) { + log.error("Timed Out waiting for DeleteComplete", ex); + } + + checkDeleteEvents(this.events); + + for (File file : expectedFiles) { + log.info("Checking file [{}] has been deleted", file.getCanonicalPath()); + assertThat(file).doesNotExist(); + } + + } + + @SneakyThrows + private List getExpectedDepositFiles(String depositMessage) { + Deposit deposit = mapper.readValue(depositMessage, Deposit.class); + String bagId = deposit.getProperties().get("bagId"); + + List baseDestinations = List.of(this.destDir1.toPath(), this.destDir2.toPath()); + List files = new ArrayList<>(); + for (Path baseDestination : baseDestinations) { + + if (chunkingEnabled) { + int numberOfChunks = getExpectedNumberChunksPerDeposit().orElseThrow(); + for (int chunkNum = 1; chunkNum <= numberOfChunks; chunkNum++) { + File chunkTarFile = baseDestination.resolve(bagId + ".tar." + chunkNum).toFile(); + files.add(chunkTarFile); + } + } else { + File singleTarFile = baseDestination.resolve(bagId + ".tar").toFile(); + files.add(singleTarFile); + } + } + return files; + } + + @Override + public List getArchiveStoreInformation() { + return List.of( + new ArchiveStoreInformation(LocalFileSystem.class,"ARCHIVE-STORE-DST-ID-1"), + new ArchiveStoreInformation(LocalFileSystem.class,"ARCHIVE-STORE-DST-ID-2")); + } + + public List getCopyUploadCompleteEvents(){ + return events.stream() + .filter(e -> e.getClass().equals(CompleteCopyUpload.class)) + .map(CompleteCopyUpload.class::cast) + .toList(); + } + + boolean foundDeleteComplete() { + return events.stream() + .anyMatch(e -> e.getClass().equals(DeleteComplete.class)); + } + + @SneakyThrows + private void buildAndSendDeleteMessage(DepositEvents depositEvents) { + String deleteMessage = depositEvents.generateDeleteMessage(); + sendNormalMessage(deleteMessage); + } + + public final long getArchiveCount() { + return 2; + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoLocationsIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoLocationsIT.java new file mode 100644 index 000000000..7e8f40744 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenDeleteTwoLocationsIT.java @@ -0,0 +1,121 @@ +package org.datavaultplatform.worker.tasks; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.awaitility.core.ConditionTimeoutException; +import org.datavaultplatform.common.event.delete.DeleteComplete; +import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; +import org.datavaultplatform.common.storage.impl.MultiLocalFileSystem; +import org.datavaultplatform.worker.utils.DepositEvents; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@Slf4j +abstract class BasePerformDepositThenDeleteTwoLocationsIT extends BaseDepositTwoLocationsIT implements DepositThenDelete { + + @Override + void taskSpecificSetup() { + } + + abstract void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize); + + @Test + @SneakyThrows + void testDepositThenDelete() { + assertEquals(0, destDir1.listFiles().length); + assertEquals(0, destDir2.listFiles().length); + String depositMessage = getSampleDepositMessage(); + Deposit deposit = new ObjectMapper().readValue(depositMessage, Deposit.class); + log.info("depositMessage {}", depositMessage); + sendNormalMessage(depositMessage); + waitUntil(this::foundComplete); + + DepositEvents depositEvents = new DepositEvents(deposit, this.events); + checkDepositEvents(); + this.events.clear(); + + checkDepositWorkedOkay(depositMessage, depositEvents); + + List expectedFiles = getExpectedDepositFiles(depositMessage); + for (File file : expectedFiles) { + log.info("Checking file [{}] exists and non-empty", file.getCanonicalPath()); + assertThat(file).exists(); + assertThat(file).isNotEmpty(); + } + + buildAndSendDeleteMessage(depositEvents); + + try { + waitUntil(this::foundDeleteComplete); + } catch (ConditionTimeoutException ex) { + log.error("Timed Out waiting for DeleteComplete", ex); + } + + checkDeleteEvents(this.events); + + for (File file : expectedFiles) { + log.info("Checking file [{}] has been deleted", file.getCanonicalPath()); + assertThat(file).doesNotExist(); + } + } + + @SneakyThrows + private List getExpectedDepositFiles(String depositMessage) { + Deposit deposit = mapper.readValue(depositMessage, Deposit.class); + String bagId = deposit.getProperties().get("bagId"); + + List baseDestinations = List.of(this.destDir1.toPath(), this.destDir2.toPath()); + List files = new ArrayList<>(); + for (Path baseDestination : baseDestinations) { + + if (chunkingEnabled) { + int numberOfChunks = getExpectedNumberChunksPerDeposit().orElseThrow(); + for (int chunkNum = 1; chunkNum <= numberOfChunks; chunkNum++) { + File chunkTarFile = baseDestination.resolve(bagId + ".tar." + chunkNum).toFile(); + files.add(chunkTarFile); + } + } else { + File singleTarFile = baseDestination.resolve(bagId + ".tar").toFile(); + files.add(singleTarFile); + } + } + return files; + } + + @Override + public List getArchiveStoreInformation() { + return List.of( + new ArchiveStoreInformation(MultiLocalFileSystem.class, "ARCHIVE-STORE-DST-ID", List.of(destDir1.getAbsolutePath(), destDir2.getAbsolutePath())) + ); + } + + public List getCopyUploadCompleteEvents(){ + return events.stream() + .filter(e -> e.getClass().equals(CompleteCopyUpload.class)) + .map(CompleteCopyUpload.class::cast) + .toList(); + } + + boolean foundDeleteComplete() { + return events.stream() + .anyMatch(e -> e.getClass().equals(DeleteComplete.class)); + } + + @SneakyThrows + private void buildAndSendDeleteMessage(DepositEvents depositEvents) { + String deleteMessage = depositEvents.generateDeleteMessage(); + sendNormalMessage(deleteMessage); + } + + public final long getArchiveCount() { + return 1L; + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRestartThenRetrieveIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRestartThenRetrieveIT.java index 49933f5af..d8d86524c 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRestartThenRetrieveIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRestartThenRetrieveIT.java @@ -17,9 +17,9 @@ import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; +import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.util.Assert; import java.io.File; @@ -41,7 +41,7 @@ public abstract class BasePerformDepositThenRestartThenRetrieveIT extends BaseDe @Autowired RabbitMessageSelectorScheduler scheduler; - @MockBean + @MockitoBean TaskStageEventListener taskStageEventListener; List taskStageEvents; @@ -115,7 +115,7 @@ void testDepositThenRestartThenRetrieve() { log.info("-----------------------------------------------------"); log.info("ITERATION[{}][{}]-------------------------------------------", ++count, interruptAtEventClass.getSimpleName()); log.info("-----------------------------------------------------"); - scheduler.setChecker(new TaskInterrupter.Checker(event -> interruptAtEventClass.getName().equals(event.getClass().getName()), interruptAtEventClass.getSimpleName())); + scheduler.setChecker(new TaskInterrupter.Checker(interruptAtEventClass::isInstance, interruptAtEventClass.getSimpleName())); Event lastEvent = nextLastEvent; depositMessage = mapper.writeValueAsString(deposit); log.info("depositMessage {}", depositMessage); @@ -235,6 +235,7 @@ boolean foundEvent(Class eventClass) { .anyMatch(e -> e.getClass().equals(eventClass)); } + @Override boolean foundComplete() { return foundEvent(Complete.class); } diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveIT.java index 8b3e3dc33..5fb80a61e 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveIT.java @@ -3,8 +3,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.datavaultplatform.common.event.deposit.Complete; import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; @@ -16,15 +14,12 @@ import org.springframework.test.context.DynamicPropertySource; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.HashSet; import java.util.List; import java.util.Optional; -import java.util.Set; import static org.assertj.core.api.Assertions.assertThat; import static org.datavaultplatform.worker.tasks.retrieve.RetrieveUtils.DATA_VAULT_HIDDEN_FILE_NAME; @@ -37,7 +32,7 @@ public abstract class BasePerformDepositThenRetrieveIT extends BaseDepositIT { @Override void taskSpecificSetup() throws IOException { Path baseTemp = Paths.get(this.tempDir); - retrieveBaseDir = baseTemp.resolve("retrieve").toFile(); + retrieveBaseDir = baseTemp.resolve("user-to").toFile(); retrieveDir = retrieveBaseDir.toPath().resolve("ret-folder").toFile(); Files.createDirectories(retrieveBaseDir.toPath()); Files.createDirectories(retrieveDir.toPath()); @@ -125,6 +120,7 @@ boolean foundRetrieveComplete() { .anyMatch(e -> e.getClass().equals(RetrieveComplete.class)); } + @Override boolean foundComplete() { return events.stream() .anyMatch(e -> e.getClass().equals(Complete.class)); diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveThenRestartIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveThenRestartIT.java index 133df48b7..fb6048de4 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveThenRestartIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/BasePerformDepositThenRetrieveThenRestartIT.java @@ -144,6 +144,7 @@ boolean foundEvent(Class eventClass) { .anyMatch(e -> e.getClass().equals(eventClass)); } + @Override boolean foundComplete() { return foundEvent(Complete.class); } @@ -175,7 +176,7 @@ private void buildAndSendRetrieveMessages(DepositEvents depositEvents) { log.info("ITERATION[{}][{}]-------------------------------------------", ++count, interruptAtEventClass.getSimpleName()); log.info("CALCULATED TAR FILE [{}]calculated-exists[{}]", tarFilePath, Files.exists(tarFilePath)); log.info("-----------------------------------------------------"); - scheduler.setChecker(new TaskInterrupter.Checker(event -> interruptAtEventClass.getName().equals(event.getClass().getName()), interruptAtEventClass.getSimpleName())); + scheduler.setChecker(new TaskInterrupter.Checker(interruptAtEventClass::isInstance, interruptAtEventClass.getSimpleName())); Event lastEvent = nextLastEvent; retrieveMessage = mapper.writeValueAsString(retrieve); log.info("retrieveMessage {}", retrieveMessage); diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparator.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparator.java new file mode 100644 index 000000000..9b3913692 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparator.java @@ -0,0 +1,28 @@ +package org.datavaultplatform.worker.tasks; + +import java.io.File; +import java.util.Comparator; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Had to create a comparator for Delete - want to sort delete message by file chunk numbers. + * @see org.datavaultplatform.worker.tasks.Delete + */ +public class ChunkFileComparator implements Comparator { + + static final Pattern PATTERN = Pattern.compile(".*\\.tar\\.(\\d+)"); + + @Override + public int compare(File file1, File file2) { + Matcher m1 = PATTERN.matcher(file1.getName()); + Matcher m2 = PATTERN.matcher(file2.getName()); + if (m1.matches() && m2.matches()) { + int num1 = m1.group(1) == null ? 0 : Integer.parseInt(m1.group(1)); + int num2 = m2.group(1) == null ? 0 : Integer.parseInt(m2.group(1)); + return Integer.compare(num1, num2); + } else { + return file1.getName().compareTo(file2.getName()); + } + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparatorTest.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparatorTest.java new file mode 100644 index 000000000..cf8a4bc39 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/ChunkFileComparatorTest.java @@ -0,0 +1,57 @@ +package org.datavaultplatform.worker.tasks; + +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.util.*; +import java.util.regex.Matcher; + +import static org.assertj.core.api.Assertions.assertThat; + + +class ChunkFileComparatorTest { + + final Comparator chunkFileComparator = new ChunkFileComparator(); + + @Test + void testPatternWithChunkNumber(){ + String example = "/private/tmp/blah/temp/worker-store/d87ca007-9cee-4c49-8169-f74c2b90b773.tar.1234"; + Matcher m = ChunkFileComparator.PATTERN.matcher(example); + assertThat(m.matches()).isTrue(); + assertThat(m.group(1)).isEqualTo("1234"); + } + + @Test + void testPatternWithoutChunkNumber(){ + String example = "/private/tmp/blah/temp/worker-store/d87ca007-9cee-4c49-8169-f74c2b90b773.tar"; + Matcher m = ChunkFileComparator.PATTERN.matcher(example); + assertThat(m.matches()).isFalse(); + } + + @Test + void testRegexAndSort() { + + List items = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + int num = i + 1; + items.add(new File("/private/tmp/blah/temp/worker-store/d87ca007-9cee-4c49-8169-f74c2b90b773.tar." + num)); + } + Collections.shuffle(items); + + + File[] shuffled = items.toArray(new File[0]); + for (int i = 0; i < shuffled.length; i++) { + int num = i + 1; + File file = shuffled[i]; + System.out.printf("before %4d-%s%n", num, file.getName()); + } + + File[] sortedFiles = Arrays.stream(shuffled).sorted(chunkFileComparator).toArray(File[]::new); + for (int i = 0; i < sortedFiles.length; i++) { + int num = i + 1; + File file = sortedFiles[i]; + assertThat(file.getName()).endsWith(".tar." + num); + System.out.printf("after %4d-%s%n", num, file.getName()); + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DeleteTest.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DeleteTest.java new file mode 100644 index 000000000..e54dcfdfa --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DeleteTest.java @@ -0,0 +1,995 @@ +package org.datavaultplatform.worker.tasks; + +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.read.ListAppender; +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.SneakyThrows; +import org.datavaultplatform.common.PropNames; +import org.datavaultplatform.common.event.Error; +import org.datavaultplatform.common.event.Event; +import org.datavaultplatform.common.event.EventSender; +import org.datavaultplatform.common.event.delete.DeleteComplete; +import org.datavaultplatform.common.event.delete.DeletedChunk; +import org.datavaultplatform.common.io.Progress; +import org.datavaultplatform.common.model.ArchiveStore; +import org.datavaultplatform.common.model.Job; +import org.datavaultplatform.common.storage.impl.LocalFileSystem; +import org.datavaultplatform.common.storage.impl.MultiLocalFileSystem; +import org.datavaultplatform.common.task.Context; +import org.datavaultplatform.common.task.Task; +import org.datavaultplatform.common.util.StorageClassNameResolver; +import org.junit.jupiter.api.*; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.Assert; + +import java.io.File; +import java.io.FileNotFoundException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ThreadLocalRandom; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.*; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +@Execution(ExecutionMode.SAME_THREAD) +@TestClassOrder(ClassOrderer.OrderAnnotation.class) // Necessary! +class DeleteTest { + + private static final Logger LOGGER = LoggerFactory.getLogger(DeleteTest.class); + + private static final String ARCHIVE_STORE_ID = "TEST-ARCHIVE-STORE-ID"; + private static final String ARCHIVE_ID = "TEST-ARCHIVE-ID"; + private static final String JOB_ID = "TEST-JOB-ID"; + private static final String USER_ID = "TEST-USER-ID"; + private static final String DEPOSIT_ID = "TEST-DEPOSIT-ID"; + private static final String BAG_ID = "TEST-BAG-ID"; + private static final String ERROR_CHUNK_NUMBER = "ERROR-CHUNK-NUMBER"; + private static final String LOCATION_ONE = "location-one"; + private static final String LOCATION_TWO = "location-two"; + + // CopyOnWriteArrayList allows for concurrent tasks to updated in parallel + // has to be static because it's referenced by static class instances + private static final List deletedFiles = new CopyOnWriteArrayList<>(); + + // has to be static because it's referenced by static class instances + private static final Map> deletedFilesByLocation = new ConcurrentHashMap<>(); + + final Path tempBase = Path.of("/tmp/delete"); + final Path archiveStoreRoot = tempBase.resolve("archiveStoreRoot"); + + @Mock + EventSender mEventSender; + @Mock + Context mContext; + @Mock + StorageClassNameResolver mStorageClassResolver; + @Mock + Job mJob; + + org.datavaultplatform.common.model.ArchiveStore archiveStoreSuccess; + org.datavaultplatform.common.model.ArchiveStore archiveStoreFailure; + List nonDeletedChunks; + List deletedChunkEvents; + Date startTimestamp; + + @AfterEach + void tearDown() { + deletedFiles.clear(); + deletedFilesByLocation.clear(); + } + + @BeforeEach + @SneakyThrows + void setup() { + Files.createDirectories(archiveStoreRoot); + + this.startTimestamp = new Date(); + this.nonDeletedChunks = new CopyOnWriteArrayList<>(); + this.deletedChunkEvents = new CopyOnWriteArrayList<>(); + deletedFiles.clear(); + deletedFilesByLocation.clear(); + + this.archiveStoreSuccess = createArchiveStore("TEST ARCHIVE STORE SUCCESS"); + this.archiveStoreFailure = createArchiveStore("TEST ARCHIVE STORE FAILURE"); + + Path tempDir = tempBase.resolve("tempDir"); + Path metaDir = tempBase.resolve("metaDir"); + when(mContext.getTempDir()).thenReturn(tempDir); + when(mContext.getMetaDir()).thenReturn(metaDir); + when(mContext.isChunkingEnabled()).thenReturn(true); + when(mContext.getNoChunkThreads()).thenReturn(Runtime.getRuntime().availableProcessors()); + when(mContext.getStorageClassNameResolver()).thenReturn(mStorageClassResolver); + when(mJob.getID()).thenReturn(JOB_ID); + + when(mContext.getEventSender()).thenReturn(mEventSender); + doAnswer(invocationOnMock -> { + // when the mock eventSender is sent an event - we put it into deletedChunks or nonDeletedChunks + Event event = invocationOnMock.getArgument(0); + if (event instanceof DeletedChunk dc) { + deletedChunkEvents.add(dc); + } else { + nonDeletedChunks.add(event); + } + return null; + }).when(mEventSender).send(any(Event.class)); + + // a no-op - will return the value passed in ( without modification ) + doAnswer((Answer) invocation -> + invocation.getArgument(0)).when(mStorageClassResolver).resolveStorageClassName(any(String.class)); + } + + private void checkNoErrors() { + assertThat(nonDeletedChunks).noneMatch(Error.class::isInstance); + } + + private Error findDeleteError() { + List errors = nonDeletedChunks.stream() + .filter(Error.class::isInstance) + .map(Error.class::cast) + .toList(); + // we are only expecting 1 error + assertThat(errors).hasSize(1); + // when we look for an error - we can also check that there's not a DeleteComplete! + assertThat(errors).noneMatch(DeleteComplete.class::isInstance); + return errors.get(0); + } + + private Task getTask(org.datavaultplatform.common.model.ArchiveStore archiveStore, Integer numberOfChunks, boolean sendDeletedChunks) { + Map properties = new HashMap<>(); + properties.put(ARCHIVE_STORE_ID, ARCHIVE_ID); + if (numberOfChunks != null) { + properties.put(PropNames.NUM_OF_CHUNKS, String.valueOf(numberOfChunks)); + } + properties.put(PropNames.ARCHIVE_SIZE, "10000"); + properties.put(PropNames.DEPOSIT_ID, DEPOSIT_ID); + properties.put(PropNames.BAG_ID, BAG_ID); + properties.put(PropNames.USER_ID, USER_ID); + // we only want to send WORKERS_SEND_DELETED_CHUNK_EVENTS if true - to test that a missing value defaults to "false" + if (sendDeletedChunks) { + properties.put(PropNames.WORKERS_SEND_DELETED_CHUNK_EVENTS, "true"); + } + List archiveStores = List.of(archiveStore); + Map> userFileStoreProperties = Collections.emptyMap(); + Map userFileStoreClasses = Collections.emptyMap(); + List fileStorePaths = Collections.emptyList(); + List fileUploadPaths = Collections.emptyList(); + Map chunkFilesDigest = Collections.emptyMap(); + byte[] tarIV = new byte[0]; + Map chunksIVs = Collections.emptyMap(); + String encTarDigest = null; + Map encChunksDigest = Collections.emptyMap(); + Event lastEvent = null; + //noinspection ConstantValue + return new Task(mJob, + properties, + archiveStores, + userFileStoreProperties, + userFileStoreClasses, + fileStorePaths, + fileUploadPaths, + chunkFilesDigest, + tarIV, + chunksIVs, + encTarDigest, + encChunksDigest, + lastEvent); + } + + @SneakyThrows + private Delete getDelete(Task commonTask) { + ObjectMapper mapper = new ObjectMapper(); + String message = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(commonTask); + Class clazz = Class.forName(commonTask.getTaskClass()).asSubclass(Task.class); + assertThat(Delete.class).isAssignableFrom(clazz); + LOGGER.info(message); + return mapper.readValue(message, Delete.class); + } + + /** + * we want to check that the delete tasks sent by generic Delete Task match the deleted Files recorded by MultiLocationsArchiveStoreFailureImpl + * + * @param expectedNumberOfChunksDeleted - the expected number of DeleteChunk events + * @param deletedChunksByLocation the Map of Location to List of DeleteChunk events + */ + private void pairUpDeletedChunksAndDeletedFilesWithLocation(int expectedNumberOfChunksDeleted, Map> deletedChunksByLocation) { + long totalDeletedChunks = deletedChunksByLocation.values().stream().mapToInt(List::size).sum(); + assertThat(totalDeletedChunks).isEqualTo(expectedNumberOfChunksDeleted); + assertThat(deletedChunksByLocation).hasSameSizeAs(deletedFilesByLocation); + + deletedChunksByLocation.forEach((location, deletedChunksAtLocation) -> { + List deletedFilesAtLocation = deletedFilesByLocation.get(location); + assertThat(deletedChunksAtLocation).hasSameSizeAs(deletedFilesAtLocation); + deletedChunksAtLocation.forEach(dc -> { + int chunkNumber = dc.getChunkNumber(); + Stream chunkFilesAtLocationStream = deletedFilesAtLocation.stream(). + filter(file -> file.getAbsolutePath().contains("/tmp/delete/tempDir/TEST-BAG-ID.tar")); + if (chunkNumber > 0) { + chunkFilesAtLocationStream = chunkFilesAtLocationStream.filter(file -> file.getName().endsWith("." + dc.getChunkNumber())); + } + List chunkFilesAtLocation = chunkFilesAtLocationStream.toList(); + assertThat(chunkFilesAtLocation).hasSize(1); + }); + }); + } + + /** + * When we are no using Locations... + * we want to check that the delete tasks sent by generic Delete Task match the deleted Files recorded by ArchiveStoreFailureImpl + * + * @param expectedNumberOfDeletedChunks the expected number of DeleteChunk events + * @param deletedChunks the list of DeletedChunk events + */ + private void pairUpDeletedChunksAndDeletedFilesNoLocation(int expectedNumberOfDeletedChunks, List deletedChunks) { + assertThat(deletedChunks).hasSize(expectedNumberOfDeletedChunks); + assertThat(deletedChunks).hasSameSizeAs(deletedFiles); + + deletedChunks.forEach(dc -> { + int chunkNumber = dc.getChunkNumber(); + Stream chunkFilesStream = deletedFiles.stream(). + filter(file -> file.getAbsolutePath().contains("/tmp/delete/tempDir/TEST-BAG-ID.tar")); + if (chunkNumber > 0) { + chunkFilesStream = chunkFilesStream.filter(file -> file.getName().endsWith("." + dc.getChunkNumber())); + } + List chunkFilesAt = chunkFilesStream.toList(); + assertThat(chunkFilesAt).hasSize(1); + }); + } + + private void checkDeletedChunk(DeletedChunk dc, String location, int chunkNumber, String message) { + checkEvent(DeletedChunk.class, dc, location, chunkNumber, message); + } + + private void checkError(Error error, String location, int chunkNumber, String message) { + checkEvent(Error.class, error, location, chunkNumber, message); + } + + private void checkEvent(Class expectedEventClass, Event event, String location, int chunkNumber, String message) { + assertThat(event.getEventClass()).isEqualTo(expectedEventClass.getCanonicalName()); + assertThat(event.getChunkNumber()).isEqualTo(chunkNumber); + assertThat(event.getLocation()).isEqualTo(location); + assertThat(event.getMessage()).isEqualTo(message); + + assertThat(event.getTimestamp()).isAfterOrEqualTo(startTimestamp); + assertThat(event.getDepositId()).isEqualTo(DEPOSIT_ID); + assertThat(event.getJobId()).isEqualTo(JOB_ID); + assertThat(event.getUserId()).isEqualTo(USER_ID); + assertThat(event.getArchiveStoreId()).isEqualTo(ARCHIVE_STORE_ID); + assertThat(event.getArchiveId()).isEqualTo(ARCHIVE_ID); + } + + private void performDeleteSuccess(Delete delete) { + try { + delete.performAction(mContext); + } catch (RuntimeException rte) { + Assertions.fail("unexpected exception", rte); + } + } + + private void performDeleteAndCheckForDeleteFileException(Delete delete, String expectedDteMessage) { + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, 0); + } + + private void performDeleteAndCheckForDeleteFileException(Delete delete, String expectedDteMessage, int errorChunkNumber, String location) { + try { + delete.performAction(mContext); + Assertions.fail("expected the Delete Task to fail!"); + } catch (RuntimeException rte) { + if (rte.getCause() instanceof DeleteFileException dte) { + assertThat(dte.getLocation()).isEqualTo(location); + assertThat(dte.getArchiveStoreId()).isEqualTo(ARCHIVE_STORE_ID); + assertThat(dte.getChunkNumber()).isEqualTo(errorChunkNumber); + assertThat(dte.getMessage()).isEqualTo(expectedDteMessage); + } else { + Assertions.fail("expected DeleteFileException!"); + } + } + } + + private void performDeleteAndCheckForDeleteFileException(Delete delete, String expectedDteMessage, int errorChunkNumber) { + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, errorChunkNumber, Delete.NO_LOCATION); + } + + private Map> getDeletedChunksByLocation() { + return deletedChunkEvents.stream(). + sorted(Comparator.comparing(DeletedChunk::getChunkNumber)). + collect(Collectors.groupingBy(DeletedChunk::getLocation)); + } + + private int getErrorChunkNumber(int numberOfChunks) { + int errorChunkNumber = ThreadLocalRandom.current().nextInt(1, numberOfChunks + 1); + assertThat(errorChunkNumber) + .isGreaterThanOrEqualTo(1) + .isLessThanOrEqualTo(numberOfChunks); + return errorChunkNumber; + } + + private org.datavaultplatform.common.model.ArchiveStore createArchiveStore(String label) { + // usually the ArchiveStore ID is injected by hibernate/JPA - you can't normally do it + var result = new org.datavaultplatform.common.model.ArchiveStore() { + public String getId() { + return ARCHIVE_STORE_ID; + } + }; + HashMap archiveStoreProps = new HashMap<>(); + archiveStoreProps.put(PropNames.ROOT_PATH, archiveStoreRoot.toAbsolutePath().toString()); + result.setProperties(archiveStoreProps); + result.setLabel(label); + return result; + } + + private Delete createDelete(ArchiveStore archiveStore, int numberOfChunks) { + Task task = getTask(archiveStore, numberOfChunks, true); + task.setTaskClass(Delete.class.getName()); + return getDelete(task); + } + + private Delete createDeleteNoDeletedChunks(ArchiveStore archiveStore, int numberOfChunks) { + Task task = getTask(archiveStore, numberOfChunks, false); + task.setTaskClass(Delete.class.getName()); + return getDelete(task); + } + + @Order(1) + @Nested + @TestMethodOrder(MethodOrderer.OrderAnnotation.class) + class NotMultipleLocationsTests { + + @BeforeEach + void setup() { + archiveStoreSuccess.setStorageClass(ArchiveStoreSuccessImpl.class.getName()); + archiveStoreFailure.setStorageClass(ArchiveStoreFailureImpl.class.getName()); + } + + @Order(1) + @Test + void testSuccessWithNoChunks() { + when(mContext.isChunkingEnabled()).thenReturn(false); + + Delete delete = createDelete(archiveStoreSuccess, 0); + performDeleteSuccess(delete); + + // check non-DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + File chunkFile = new File("/tmp/delete/tempDir/TEST-BAG-ID.tar"); + + //check deletedFiles + assertThat(deletedFiles) + .hasSize(1) + .contains(chunkFile); + + // check non-DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + // check each deleted chunk + DeletedChunk dc = deletedChunkEvents.get(0); + checkDeletedChunk(dc, Delete.NO_LOCATION, 0, "Deleted Chunk [0/0] from (ArchiveStoreSuccessImpl/TEST-ARCHIVE-STORE-ID/no-location)"); + + verify(mContext, times(1)).isChunkingEnabled(); + verify(mEventSender, times(4 + 1)).send(any(Event.class)); + pairUpDeletedChunksAndDeletedFilesNoLocation(1, deletedChunkEvents); + } + + @Order(2) + @SneakyThrows + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + void testSuccessWithChunks(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + Delete delete = createDelete(archiveStoreSuccess, numberOfChunks); + performDeleteSuccess(delete); + + String template = "/tmp/delete/tempDir/TEST-BAG-ID.tar.%d"; + + // we are using deletedFiles - not deletedFilesByLocation + assertThat(deletedFiles).hasSize(numberOfChunks); + assertThat(deletedFilesByLocation).isEmpty(); + + for (int i = 0; i < numberOfChunks; i++) { + int chunkNumber = i + 1; + File chunkFile = new File(template.formatted(chunkNumber)); + assertThat(deletedFiles).contains(chunkFile); + } + + // eventSender + verify(mEventSender, times(4 + numberOfChunks)).send(any(Event.class)); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + assertThat(deletedChunkEvents).hasSize(numberOfChunks); + deletedChunkEvents.sort(Comparator.comparingInt(DeletedChunk::getChunkNumber)); + for (int i = 0; i < numberOfChunks; i++) { + int chunkNumber = i + 1; + DeletedChunk dc = deletedChunkEvents.get(i); + String expectedMessage = "Deleted Chunk [%s/%s] from (ArchiveStoreSuccessImpl/TEST-ARCHIVE-STORE-ID/no-location)".formatted(chunkNumber, numberOfChunks); + checkDeletedChunk(dc, Delete.NO_LOCATION, chunkNumber, expectedMessage); + } + verify(mContext).isChunkingEnabled(); + checkNoErrors(); + pairUpDeletedChunksAndDeletedFilesNoLocation(numberOfChunks, deletedChunkEvents); + } + + + @Order(3) + @Test + @SneakyThrows + void testFailureNoChunks() { + when(mContext.isChunkingEnabled()).thenReturn(false); + + Delete delete = createDelete(archiveStoreFailure, 0); + String expectedDteMessage = "ArchiveStore[ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[no-location]ChunkNum[0]Cause[java.lang.RuntimeException/oops@no-chunks]"; + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + // the single delete chunk that was attempted failed + assertThat(deletedFiles).isEmpty(); + + // deleted chunks + assertThat(deletedChunkEvents).isEmpty(); + + // verify + verify(mEventSender, atLeast(4)).send(any(Event.class)); + verify(mContext).isChunkingEnabled(); + + Error error = findDeleteError(); + checkError(error, Delete.NO_LOCATION, 0, "Deposit delete failed: " + expectedDteMessage); + + pairUpDeletedChunksAndDeletedFilesNoLocation(0, deletedChunkEvents); + } + + @Order(4) + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + @SneakyThrows + void testFailureWithChunks(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + // put the ERROR_CHUNK_NUMBER into the 'fake archive' so it knows when to throw error + int errorChunkNumber = getErrorChunkNumber(numberOfChunks); + archiveStoreFailure.getProperties().put(ERROR_CHUNK_NUMBER, String.valueOf(errorChunkNumber)); + + Delete delete = createDelete(archiveStoreFailure, numberOfChunks); + String expectedDteMessage = "ArchiveStore[ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[no-location]ChunkNum[%d]Cause[java.lang.RuntimeException/oops@%d]".formatted(errorChunkNumber, errorChunkNumber); + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, errorChunkNumber); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + int expectedDeletedChunks = numberOfChunks - 1; + assertThat(deletedChunkEvents).hasSize(expectedDeletedChunks); + + // we are using deleteFiles not deletedFilesByLocation + assertThat(deletedFiles).hasSize(expectedDeletedChunks); + + deletedChunkEvents.sort(Comparator.comparingInt(DeletedChunk::getChunkNumber)); + + // 1 .. errorChunkNumber-1 + for (int i = 0; i < errorChunkNumber - 1; i++) { + DeletedChunk dc = deletedChunkEvents.get(i); + int chunkNumber = i + 1; + String expectedMessage = "Deleted Chunk [%s/%s] from (ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/no-location)".formatted(chunkNumber, numberOfChunks); + checkDeletedChunk(dc, Delete.NO_LOCATION, chunkNumber, expectedMessage); + } + + // errorChunkNumber+1 .. numberOfChunks + for (int i = errorChunkNumber - 1; i < numberOfChunks - 1; i++) { + DeletedChunk dc = deletedChunkEvents.get(i); + int chunkNumber = i + 2; + String expectedMessage = "Deleted Chunk [%s/%s] from (ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/no-location)".formatted(chunkNumber, numberOfChunks); + checkDeletedChunk(dc, Delete.NO_LOCATION, chunkNumber, expectedMessage); + } + + // check that there's no DeletedChunk errorChunkNumber + Optional optionalDeletedChunk = deletedChunkEvents.stream() + .filter(dc -> dc.getChunkNumber().equals(errorChunkNumber)) + .findFirst(); + assertThat(optionalDeletedChunk).isEmpty(); + + verify(mContext).isChunkingEnabled(); + verify(mEventSender, times(4 + expectedDeletedChunks)).send(any(Event.class)); + + Error error = findDeleteError(); + checkError(error, Delete.NO_LOCATION, errorChunkNumber, "Deposit delete failed: " + expectedDteMessage); + + pairUpDeletedChunksAndDeletedFilesNoLocation(numberOfChunks - 1, deletedChunkEvents); + } + + @Order(5) + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + @SneakyThrows + void testFailureWithChunksButNoDeletedChunkEventsSent(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + // put the ERROR_CHUNK_NUMBER into the 'fake archive' so it knows when to throw error + int errorChunkNumber = getErrorChunkNumber(numberOfChunks); + archiveStoreFailure.getProperties().put(ERROR_CHUNK_NUMBER, String.valueOf(errorChunkNumber)); + + ch.qos.logback.classic.Logger deleteLogger = (ch.qos.logback.classic.Logger)LoggerFactory.getLogger(Delete.class); + ListAppender listAppender = new ListAppender<>(); + deleteLogger.addAppender(listAppender); + listAppender.start(); + + Delete delete = createDeleteNoDeletedChunks(archiveStoreFailure, numberOfChunks); + String expectedDteMessage = "ArchiveStore[ArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[no-location]ChunkNum[%d]Cause[java.lang.RuntimeException/oops@%d]".formatted(errorChunkNumber, errorChunkNumber); + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, errorChunkNumber); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + int expectedDeletedChunks = numberOfChunks - 1; + assertThat(deletedChunkEvents).isEmpty(); + + // we are using deleteFiles not deletedFilesByLocation + assertThat(deletedFiles).hasSize(expectedDeletedChunks); + + verify(mContext).isChunkingEnabled(); + verify(mEventSender, times(4)).send(any(Event.class)); + + Error error = findDeleteError(); + checkError(error, Delete.NO_LOCATION, errorChunkNumber, "Deposit delete failed: " + expectedDteMessage); + + assertThat(deletedFiles).hasSize(expectedDeletedChunks); + + listAppender.stop(); + deleteLogger.detachAppender(listAppender); + List notSendingMessages = listAppender.list.stream() + .map(ILoggingEvent::getFormattedMessage) + .filter(m -> m.startsWith("NOT SENDING")) + .toList(); + assertThat(notSendingMessages).hasSize(expectedDeletedChunks); + String prefixTemplate = "NOT SENDING Deleted Chunk [%d/%d]"; + for (int i = 1; i <= numberOfChunks; i++) { + if (i != errorChunkNumber) { + String prefix = prefixTemplate.formatted(i, numberOfChunks); + assertThat(notSendingMessages.stream().anyMatch(m -> m.startsWith(prefix))).isTrue(); + } + } + String prefix = prefixTemplate.formatted(errorChunkNumber, numberOfChunks); + assertThat(notSendingMessages.stream().noneMatch(m -> m.startsWith(prefix))).isTrue(); + } + + /** + * Subclasses LocalFileSystem to make sure we get he same class/interface hierarchy with ArchiveStore/Device + * We only need to override delete. + */ + public static class ArchiveStoreSuccessImpl extends LocalFileSystem { + public ArchiveStoreSuccessImpl(String name, Map config) throws FileNotFoundException { + super(name, config); + } + + @Override + public void delete(String path, File working, Progress progress) { + deletedFiles.add(working); + } + } + + /** + * Subclasses LocalFileSystem to make sure we get he same class/interface hierarchy with ArchiveStore/Device + * We only need to override delete. + * It is set up to throw an Exception when it encounters the chunk numbered : ERROR_CHUNK_NUMBER + * This is hard enough in a Unit Test - very, very hard to do within an Integration Test + */ + public static class ArchiveStoreFailureImpl extends ArchiveStoreSuccessImpl { + final Pattern regex = Pattern.compile("^(.*)(\\.)(\\d+)$"); + private final Long errorChunkNumber; + + public ArchiveStoreFailureImpl(String name, Map config) throws FileNotFoundException { + super(name, config); + if (config.containsKey(ERROR_CHUNK_NUMBER)) { + this.errorChunkNumber = Long.valueOf(config.get(ERROR_CHUNK_NUMBER)); + } else { + this.errorChunkNumber = null; + } + } + + @Override + public void delete(String path, File working, Progress progress) { + if (errorChunkNumber == null) { + throw new RuntimeException("oops@no-chunks"); + } + Matcher matcher = regex.matcher(path); + Assert.isTrue(matcher.matches(), "failed to file chunkNumber at end of filename!"); + Long chunkNumber = Long.valueOf(matcher.group(3)); + if (errorChunkNumber.equals(chunkNumber)) { + throw new RuntimeException("oops@" + chunkNumber); + } else { + deletedFiles.add(working); + } + } + } + + } + + @Nested + @Order(2) + @TestMethodOrder(MethodOrderer.OrderAnnotation.class) + class MultipleLocationsTests { + + String location1; + String location2; + + @BeforeEach + @SneakyThrows + void setup() { + archiveStoreSuccess.setStorageClass(MultiLocationsArchiveStoreSuccessImpl.class.getName()); + archiveStoreFailure.setStorageClass(MultiLocationsArchiveStoreFailureImpl.class.getName()); + + Path location1path = tempBase.resolve(LOCATION_ONE); + Path location2path = tempBase.resolve(LOCATION_TWO); + Files.createDirectories(location1path); + Files.createDirectories(location2path); + location1 = location1path.toFile().getCanonicalPath(); + location2 = location2path.toFile().getCanonicalPath(); + String multiLocationRootPath = location1 + "," + location2; + archiveStoreSuccess.getProperties().put(PropNames.ROOT_PATH, multiLocationRootPath); + archiveStoreFailure.getProperties().put(PropNames.ROOT_PATH, multiLocationRootPath); + } + + + @Order(1) + @Test + void testSuccessWithNoChunks() { + when(mContext.isChunkingEnabled()).thenReturn(false); + + Delete delete = createDelete(archiveStoreSuccess, 0); + performDeleteSuccess(delete); + + // check non-DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + File chunkFile = new File("/tmp/delete/tempDir/TEST-BAG-ID.tar"); + + //check deletedFiles by location + assertThat(deletedFilesByLocation).containsOnlyKeys(location1, location2); + //noinspection CodeBlock2Expr + deletedFilesByLocation.forEach((location, locationDeletedFiles) -> { + assertThat(locationDeletedFiles) + .hasSize(1) + .contains(chunkFile); + }); + + // check non-DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + // check each deleted chunk + Map> deletedChunksByLocation = getDeletedChunksByLocation(); + assertThat(deletedChunksByLocation.keySet()).containsExactlyInAnyOrder(location1, location2); + //noinspection CodeBlock2Expr + deletedChunksByLocation.forEach((location, deletedChunksAtLocation) -> { + deletedChunksAtLocation.forEach(dc -> { + String expectedMessage = "Deleted Chunk [0/0] from (MultiLocationsArchiveStoreSuccessImpl/TEST-ARCHIVE-STORE-ID/%s)".formatted(location); + checkDeletedChunk(dc, location, 0, expectedMessage); + }); + }); + + verify(mContext, times(2)).isChunkingEnabled(); + verify(mEventSender, times(4 + 2)).send(any(Event.class)); + pairUpDeletedChunksAndDeletedFilesWithLocation(2, deletedChunksByLocation); + } + + @Order(2) + @SneakyThrows + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + void testSuccessWithChunks(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + int numLocations = 2; + Delete delete = createDelete(archiveStoreSuccess, numberOfChunks); + performDeleteSuccess(delete); + + String template = "/tmp/delete/tempDir/TEST-BAG-ID.tar.%d"; + + // we are using deletedFilesByLocation - not deletedFiles + assertThat(deletedFiles).isEmpty(); + assertThat(deletedFilesByLocation).containsOnlyKeys(location1, location2); + + deletedFilesByLocation.forEach((location, deletedLocationFiles) -> { + assertThat(deletedLocationFiles).hasSize(numberOfChunks); + for (int i = 0; i < numberOfChunks; i++) { + int chunkNumber = i + 1; + File chunkFile = new File(template.formatted(chunkNumber)); + assertThat(deletedLocationFiles).contains(chunkFile); + } + }); + + // eventSender + verify(mEventSender, times(4 + (numberOfChunks * numLocations))).send(any(Event.class)); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + Map> deletedChunksByLocation = getDeletedChunksByLocation(); + assertThat(deletedChunksByLocation).containsOnlyKeys(location1, location2); + //noinspection CodeBlock2Expr + deletedChunksByLocation.forEach((location, deletedLocationChunks) -> { + deletedLocationChunks.forEach(dc -> { + int cn = dc.getChunkNumber(); + String expectedMessage = "Deleted Chunk [%s/%s] from (MultiLocationsArchiveStoreSuccessImpl/TEST-ARCHIVE-STORE-ID/%s)".formatted(cn, numberOfChunks, location); + checkDeletedChunk(dc, location, dc.getChunkNumber(), expectedMessage); + }); + }); + verify(mContext, times(numLocations)).isChunkingEnabled(); + checkNoErrors(); + pairUpDeletedChunksAndDeletedFilesWithLocation(numberOfChunks * 2, deletedChunksByLocation); + } + + @Order(3) + @Test + @SneakyThrows + void testFailureNoChunks() { + when(mContext.isChunkingEnabled()).thenReturn(false); + + Delete delete = createDelete(archiveStoreFailure, 0); + String expectedDteMessage = "ArchiveStore[MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[%s]ChunkNum[0]Cause[java.lang.RuntimeException/oops@no-chunks/%s]".formatted(location2, location2); + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, 0, location2); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + // the single delete chunk that was attempted failed for location2 + assertThat(deletedFilesByLocation).hasSize(1); + File noChunkFile = new File("/tmp/delete/tempDir/TEST-BAG-ID.tar"); + assertThat(deletedFilesByLocation.get(location1)).contains(noChunkFile); + + // deleted chunks + assertThat(deletedChunkEvents).hasSize(1); + DeletedChunk dc1 = deletedChunkEvents.get(0); + checkDeletedChunk(dc1, location1, 0, "Deleted Chunk [0/0] from (MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID//private/tmp/delete/location-one)"); + + // verify + verify(mEventSender, times(5)).send(any(Event.class)); + verify(mContext, times(2)).isChunkingEnabled();//once per location + + Error error = findDeleteError(); + checkError(error, location2, 0, "Deposit delete failed: " + expectedDteMessage); + + Map> deletedChunksByLocation = getDeletedChunksByLocation(); + pairUpDeletedChunksAndDeletedFilesWithLocation(1, deletedChunksByLocation); + } + + /** + * This test is sensitive to the way the order that Tasks are submitted to TaskExecutor. + * For Delete Task - tasks are submitted in 2 nested loops - outer loop being location, inner loop being chunkNumber + * TaskExecutor waits for tasks to finish in submission order - location then chunkNumber Order. + * So when MultiLocationsArchiveStoreFailureImpl throws an error processing 'location 2 and errorChunkNumber', + * some tasks will have finished - after an error is detected - tasks not yet checked for completion tasks have 5 minutes to complete before being terminated. + */ + @Order(4) + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + @SneakyThrows + void testFailureWithChunks(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + // put the ERROR_CHUNK_NUMBER into the 'fake archive' so it knows when to throw error + int errorChunkNumber = getErrorChunkNumber(numberOfChunks); + archiveStoreFailure.getProperties().put(ERROR_CHUNK_NUMBER, String.valueOf(errorChunkNumber)); + + Delete delete = createDelete(archiveStoreFailure, numberOfChunks); + String expectedDteMessage = "ArchiveStore[MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[%s]ChunkNum[%d]Cause[java.lang.RuntimeException/oops@%d/%s]".formatted(location2, errorChunkNumber, errorChunkNumber, location2); + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, errorChunkNumber, location2); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + @SuppressWarnings("UnnecessaryLocalVariable") + int expectedDeletedChunksLocation1 = numberOfChunks; + int expectedDeletedChunksLocation2 = numberOfChunks - 1; + int expectedDeletedChunks = expectedDeletedChunksLocation1 + expectedDeletedChunksLocation2; + assertThat(deletedChunkEvents).hasSize(expectedDeletedChunks); + + // we are using deleteFiles not deletedFilesByLocation + Map> deletedChunksByLocation = getDeletedChunksByLocation(); + + // LOCATION 1 + List deletedLocation1Chunks = deletedChunksByLocation.get(location1); + assertThat(deletedLocation1Chunks).hasSize(expectedDeletedChunksLocation1); + for (int i = 0; i < numberOfChunks; i++) { + DeletedChunk dc = deletedLocation1Chunks.get(i); + int chunkNumber = i + 1; + checkDeletedChunk(dc, location1, chunkNumber, "Deleted Chunk [%s/%s] from (MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/%s)".formatted(chunkNumber, numberOfChunks, location1)); + } + + // LOCATION 2 + List deletedLocation2Chunks = deletedChunksByLocation.getOrDefault(location2, Collections.emptyList()); + assertThat(deletedLocation2Chunks).hasSize(expectedDeletedChunksLocation2); + + // LOCATION 2 : 1 .. errorChunkNumber-1 + for (int i = 0; i < errorChunkNumber - 1; i++) { + DeletedChunk dc = deletedLocation2Chunks.get(i); + int chunkNumber = i + 1; + checkDeletedChunk(dc, location2, chunkNumber, "Deleted Chunk [%s/%s] from (MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/%s)".formatted(chunkNumber, numberOfChunks, location2)); + } + + // LOCATION 2 : errorChunkNumber+1 .. numberOfChunks + for (int i = errorChunkNumber - 1; i < numberOfChunks - 1; i++) { + DeletedChunk dc = deletedLocation2Chunks.get(i); + int chunkNumber = i + 2; + checkDeletedChunk(dc, location2, chunkNumber, "Deleted Chunk [%s/%s] from (MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/%s)".formatted(chunkNumber, numberOfChunks, location2)); + } + + // check that there's no DeletedChunk for location2 and errorChunkNumber + Optional optionalDeletedChunk = deletedLocation2Chunks.stream() + .filter(dc -> dc.getChunkNumber().equals(errorChunkNumber)) + .findFirst(); + assertThat(optionalDeletedChunk).isEmpty(); + + verify(mContext, times(2)).isChunkingEnabled(); + verify(mEventSender, atLeast(4 + expectedDeletedChunks)).send(any(Event.class)); + + Error error = findDeleteError(); + checkError(error, location2, errorChunkNumber, "Deposit delete failed: " + expectedDteMessage); + + pairUpDeletedChunksAndDeletedFilesWithLocation((numberOfChunks * 2) - 1, deletedChunksByLocation); + } + + @Order(5) + @ParameterizedTest + @ValueSource(ints = {1, 10, 50, 100, 1000}) + @SneakyThrows + void testFailureWithChunksButNoDeletedChunkEventsSent(int numberOfChunks) { + assertThat(numberOfChunks).isGreaterThan(0); + // put the ERROR_CHUNK_NUMBER into the 'fake archive' so it knows when to throw error + int errorChunkNumber = getErrorChunkNumber(numberOfChunks); + archiveStoreFailure.getProperties().put(ERROR_CHUNK_NUMBER, String.valueOf(errorChunkNumber)); + + ch.qos.logback.classic.Logger deleteLogger = (ch.qos.logback.classic.Logger)LoggerFactory.getLogger(Delete.class); + ListAppender listAppender = new ListAppender<>(); + deleteLogger.addAppender(listAppender); + listAppender.start(); + + Delete delete = createDeleteNoDeletedChunks(archiveStoreFailure, numberOfChunks); + String expectedDteMessage = "ArchiveStore[MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID]Location[%s]ChunkNum[%d]Cause[java.lang.RuntimeException/oops@%d/%s]".formatted(location2, errorChunkNumber, errorChunkNumber, location2); + performDeleteAndCheckForDeleteFileException(delete, expectedDteMessage, errorChunkNumber, location2); + + // non DeletedChunk events + assertThat(nonDeletedChunks).hasSize(4); + + @SuppressWarnings("UnnecessaryLocalVariable") + int expectedDeletedChunksLocation1 = numberOfChunks; + int expectedDeletedChunksLocation2 = numberOfChunks - 1; + int expectedDeletedChunks = expectedDeletedChunksLocation1 + expectedDeletedChunksLocation2; + assertThat(deletedChunkEvents).isEmpty(); + + verify(mContext, times(2)).isChunkingEnabled(); + verify(mEventSender, atLeast(4)).send(any(Event.class)); + + Error error = findDeleteError(); + checkError(error, location2, errorChunkNumber, "Deposit delete failed: " + expectedDteMessage); + + assertThat(deletedFilesByLocation.get(location1)).hasSize(numberOfChunks); + if (numberOfChunks == 1) { + assertThat(deletedFilesByLocation.containsKey(location2)).isFalse(); + } else { + assertThat(deletedFilesByLocation.get(location2)).hasSize(numberOfChunks - 1); + } + + listAppender.stop(); + deleteLogger.detachAppender(listAppender); + List notSendingMessages = listAppender.list.stream() + .map(ILoggingEvent::getFormattedMessage) + .filter(m -> m.startsWith("NOT SENDING")) + .toList(); + assertThat(notSendingMessages).hasSize(expectedDeletedChunks); + + String msgTemplate = "NOT SENDING Deleted Chunk [%d/%d] from (MultiLocationsArchiveStoreFailureImpl/TEST-ARCHIVE-STORE-ID/%s)"; + + List nonSentLocation1messages = notSendingMessages.stream().filter(m -> m.contains(location1)).toList(); + assertThat(nonSentLocation1messages).hasSize(expectedDeletedChunksLocation1); + List nonSentLocation2messages = notSendingMessages.stream().filter(m -> m.contains(location2)).toList(); + assertThat(nonSentLocation2messages).hasSize(expectedDeletedChunksLocation2); + // LOCATION 1 + for (int i = 1; i <= numberOfChunks; i++) { + String msg = msgTemplate.formatted(i, numberOfChunks, location1); + assertThat(nonSentLocation1messages.stream().anyMatch(m -> m.endsWith(msg))).isTrue(); + } + // LOCATION 2 + for (int i = 1; i <= numberOfChunks; i++) { + String msg = msgTemplate.formatted(i, numberOfChunks, location2); + if (i != errorChunkNumber) { + assertThat(nonSentLocation2messages.stream().anyMatch(msg::equals)).isTrue(); + } + } + String msg = msgTemplate.formatted(errorChunkNumber, numberOfChunks, location2); + assertThat(nonSentLocation2messages.stream().noneMatch(msg::equals)).isTrue(); + } + + /** + * Subclasses MultiLocalFileSystem to make sure we get he same class/interface hierarchy with ArchiveStore/Device + * We only need to override delete. + * It is set up to throw an Exception when it encounters the chunk numbered : ERROR_CHUNK_NUMBER + * This is hard enough in a Unit Test - very, very hard to do within an Integration Test + */ + public static class MultiLocationsArchiveStoreSuccessImpl extends MultiLocalFileSystem { + + public MultiLocationsArchiveStoreSuccessImpl(String name, Map config) throws FileNotFoundException { + super(name, config); + } + + @Override + public void delete(String path, File working, Progress progress, String location) { + recordDeletedFile(working, location); + } + + void recordDeletedFile(File working, String location) { + deletedFilesByLocation.computeIfAbsent(location, k -> new CopyOnWriteArrayList<>()).add(working); + } + } + + /** + * Subclasses MultiLocalFileSystem to make sure we get he same class/interface hierarchy with ArchiveStore/Device + * We only need to override delete. + * It is set up to throw an Exception when it encounters the chunk numbered : ERROR_CHUNK_NUMBER + * This is hard enough in a Unit Test - very, very hard to do within an Integration Test + */ + public static class MultiLocationsArchiveStoreFailureImpl extends MultiLocationsArchiveStoreSuccessImpl { + final Pattern regex = Pattern.compile("^(.*)(\\.)(\\d+)$"); + private final Long errorChunkNumber; + + public MultiLocationsArchiveStoreFailureImpl(String name, Map config) throws FileNotFoundException { + super(name, config); + if (config.containsKey(ERROR_CHUNK_NUMBER)) { + this.errorChunkNumber = Long.valueOf(config.get(ERROR_CHUNK_NUMBER)); + } else { + this.errorChunkNumber = null; + } + } + + // we will fail when the 'chunkNumber' matches 'errorChunkNumber' AND the 'location' contains 'one' + @Override + public void delete(String path, File working, Progress progress, String location) { + if (location.contains(LOCATION_ONE)) { + //all location-one tasks will succeed + recordDeletedFile(working, location); + } else { + deleteFromLocationTwo(path, working, progress, location); + } + } + + private void deleteFromLocationTwo(String path, File working, Progress progress, String location) { + Assert.notNull(path, "path cannot be null"); + Assert.notNull(working, "working cannot be null"); + Assert.notNull(progress, "progress cannot be null"); + Assert.isTrue(location.contains(LOCATION_TWO), "unexpected location " + location); + + // if no chunks - then always fail for location-two + if (errorChunkNumber == null) { + throw new RuntimeException("oops@no-chunks/" + location); + } else { + // we have location-two and chunks + Matcher matcher = regex.matcher(path); + Assert.isTrue(matcher.matches(), "failed to file chunkNumber at end of filename!"); + Long chunkNumber = Long.valueOf(matcher.group(3)); + + // task for location-two and errorChunkNumber will fail + if (errorChunkNumber.equals(chunkNumber)) { + throw new RuntimeException("oops@" + chunkNumber + "/" + location); + } else { + recordDeletedFile(working, location); + } + } + } + } + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DepositThenDelete.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DepositThenDelete.java new file mode 100644 index 000000000..b9c8e57fc --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/DepositThenDelete.java @@ -0,0 +1,139 @@ +package org.datavaultplatform.worker.tasks; + +import org.datavaultplatform.common.event.Event; +import org.datavaultplatform.common.event.InitStates; +import org.datavaultplatform.common.event.UpdateProgress; +import org.datavaultplatform.common.event.delete.DeleteComplete; +import org.datavaultplatform.common.event.delete.DeleteStart; +import org.datavaultplatform.common.event.delete.DeletedChunk; +import org.datavaultplatform.common.event.deposit.CompleteCopyUpload; +import org.datavaultplatform.common.storage.ArchiveStore; +import org.datavaultplatform.common.storage.impl.LocalFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface DepositThenDelete { + Logger log = LoggerFactory.getLogger(DepositThenDelete.class); + + long getArchiveCount(); + + Optional getExpectedNumberChunksPerDeposit(); + + List getCopyUploadCompleteEvents(); + + default List getArchiveStoreInformation(){ + return List.of(new ArchiveStoreInformation(LocalFileSystem.class,"ARCHIVE-STORE-DST-ID")); + } + + default void checkDeleteEvents(List events) { + events.sort(Comparator.comparing(Event::getSequence)); + int expectedNumberOfDeletedChunkEvents = getExpectedNumberChunksPerDeposit().orElse(1) * getChunkMultiplier(); + for (int i = 0; i < events.size(); i++) { + log.info("event[{}] {}", i, events.get(i).getClass().getName()); + } + Set uniqueDepositIds = events.stream().map(Event::getDepositId).collect(Collectors.toSet()); + assertThat(uniqueDepositIds).isEqualTo(Set.of("test-deposit-id")); + + Set uniqueJobIds = events.stream().map(Event::getJobId).collect(Collectors.toSet()); + assertThat(uniqueJobIds).hasSize(1); + + int expectedNumberOfExpectedEvents = 4 + expectedNumberOfDeletedChunkEvents; + assertThat(events).hasSize(expectedNumberOfExpectedEvents); + assertThat(events.get(0).getClass()).isEqualTo(InitStates.class); + assertThat(events.get(1).getClass()).isEqualTo(DeleteStart.class); // DELETE START + assertThat(events.get(2).getClass()).isEqualTo(UpdateProgress.class); // the very first UpdateProgress + + List deletedChunkEvents = new ArrayList<>(); + for (int i = 0; i < expectedNumberOfDeletedChunkEvents; i++) { + Event event = events.get(i + 3); + assertThat(event.getClass()).isEqualTo(DeletedChunk.class); + DeletedChunk deletedChunk = (DeletedChunk) event; + assertThat(deletedChunk.getMessage()).startsWith("Deleted Chunk ["+deletedChunk.getChunkNumber()); + deletedChunkEvents.add(deletedChunk); + } + + long eventsToBeFound = (long) getNumberOfChunks() * getChunkMultiplier(); + for (ArchiveStoreInformation info : getArchiveStoreInformation()) { + for (String location : info.locations()) { + long found = findDeleteChunkEvents(deletedChunkEvents, info.archiveStoreType.getSimpleName(), info.archiveStoreId, location); + assertThat(found).isEqualTo(getNumberOfChunks()); + eventsToBeFound -= found; + } + } + assertThat(eventsToBeFound).isZero(); + + Event lastEventIdx = events.get(expectedNumberOfDeletedChunkEvents + 3); + assertThat(lastEventIdx.getClass()).isEqualTo(DeleteComplete.class); // DELETE COMPLETE + + long start = events.get(1).getTimestamp().getTime(); + long end = events.get(3).getTimestamp().getTime(); + + // check that all UpdateProgres timestamps are between DeleteStart and DeleteComplete + int firstIndexAfterDeleteStart = 2; + int indexOfDeleteComplete = 3; + for (int i = firstIndexAfterDeleteStart; i < indexOfDeleteComplete; i++) { + Event event = events.get(i); + assertThat(event.getClass()).isEqualTo(UpdateProgress.class); + long ts = event.getTimestamp().getTime(); + assertThat(ts).isBetween(start, end); + } + } + + /** + * Note : we get 1x CopyUploadComplete per archive - not per archive*location combination + */ + default void checkDepositEvents() { + List storedChunksEvents = getCopyUploadCompleteEvents(); + + Function countByChunkNumber = chunkNumber -> storedChunksEvents.stream() + .filter(copyUpload -> Objects.equals(copyUpload.getChunkNumber(), chunkNumber)) + .count(); + + if (getExpectedNumberChunksPerDeposit().isEmpty()) { + long countPerNullChunkNumber = countByChunkNumber.apply(null); + assertThat(countPerNullChunkNumber).isEqualTo(getArchiveCount()); + } else { + int chunksPerDeposit = getExpectedNumberChunksPerDeposit().get(); + for (int i = 0; i < chunksPerDeposit; i++) { + int chunkNumber = i + 1; + long countPerChunkNumber = countByChunkNumber.apply(chunkNumber); + assertThat(countPerChunkNumber) + .as("Chunk number %d", chunkNumber) + .isEqualTo(getArchiveCount()); + } + } + } + + private int getChunkMultiplier() { + return getArchiveStoreInformation().stream() + .mapToInt(ArchiveStoreInformation::getChunkMultiplier) + .sum(); + } + + private int getNumberOfChunks() { + return getExpectedNumberChunksPerDeposit().orElse(1); + } + + private long findDeleteChunkEvents(List events, String archiveStoreType, String archiveStoreId, String location){ + return events.stream() + .filter( dc -> dc.getMessage().contains("(%s/%s/%s".formatted(archiveStoreType,archiveStoreId,location))) + .count(); + + } + + record ArchiveStoreInformation(Class archiveStoreType, String archiveStoreId, List locations) { + public ArchiveStoreInformation(Class archiveStoreType, String archiveStoreId) { + this(archiveStoreType, archiveStoreId, List.of(Delete.NO_LOCATION)); + } + + public int getChunkMultiplier() { + return locations.isEmpty() ? 1 : locations.size(); + } + } +} diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenAuditMultiChunkMultiDepositIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenAuditMultiChunkMultiDepositIT.java deleted file mode 100644 index 75bb66f32..000000000 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenAuditMultiChunkMultiDepositIT.java +++ /dev/null @@ -1,93 +0,0 @@ -package org.datavaultplatform.worker.tasks; - -import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; -import org.datavaultplatform.worker.test.AddTestProperties; -import org.datavaultplatform.worker.utils.DepositEvents; -import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.boot.test.context.TestConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.test.context.TestPropertySource; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@SpringBootTest(classes = { - DataVaultWorkerInstanceApp.class, - PerformDepositThenAuditMultiChunkMultiDepositIT.TestConfig.class -}) -@Slf4j -@AddTestProperties -@TestPropertySource(properties = "chunking.size=20MB") -public class PerformDepositThenAuditMultiChunkMultiDepositIT extends BasePerformDepositThenAuditIT { - - @Override - void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { - assertTrue(chunkingEnabled); - assertEquals("20MB", chunkingByteSize); - } - - - @Override - Optional getExpectedNumberChunksPerDeposit() { - return Optional.of(3); - } - - List getSourcePaths() { - return Arrays.asList( - SRC_PATH_1, - SRC_PATH_2 - ); - } - - @Test - @SneakyThrows - @Override - void testDepositThenAudit() { - assertEquals(0, destDir.listFiles().length); - - - DepositEvents depositEvents1 = checkDeposit(SRC_PATH_1, BAG_ID_1); - assertEquals(3, destDir.listFiles().length); - - DepositEvents depositEvents2 = checkDeposit(SRC_PATH_2, BAG_ID_2); - assertEquals(6, destDir.listFiles().length); - - String auditMessage = buildAuditMessage(Arrays.asList(depositEvents1, depositEvents2)); - - sendNormalMessage(auditMessage); - checkAudit(); - } - - @SneakyThrows - private DepositEvents checkDeposit(String srcPath, String bagId){ - events.clear(); - String depositMessage = getSampleDepositMessage(srcPath, bagId); - Deposit deposit = new ObjectMapper().readValue(depositMessage, Deposit.class); - log.info("depositMessage {}", depositMessage); - sendNormalMessage(depositMessage); - waitUntil(this::foundComplete); - - DepositEvents depositEvents = new DepositEvents(deposit, this.events); - - checkDepositWorkedOkay(srcPath, depositMessage, depositEvents); - - return depositEvents; - } - - @TestConfiguration - static class TestConfig { - @Bean - Logger monitorLogger() { - return log; - } - } -} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteManyMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteManyMultiChunksIT.java new file mode 100644 index 000000000..f316f0f9d --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteManyMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteManyMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=500KB") +class PerformDepositThenDeleteManyMultiChunksIT extends BasePerformDepositThenDeleteIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("500KB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(101); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteMultiChunksIT.java new file mode 100644 index 000000000..2ed068d26 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=20MB") +class PerformDepositThenDeleteMultiChunksIT extends BasePerformDepositThenDeleteIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("20MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(3); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteNoChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteNoChunksIT.java new file mode 100644 index 000000000..93ff62cdd --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteNoChunksIT.java @@ -0,0 +1,45 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteNoChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = {"chunking.enabled=false","chunking.size=0"}) +class PerformDepositThenDeleteNoChunksIT extends BasePerformDepositThenDeleteIT { + + @Override + + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertFalse(chunkingEnabled); + assertEquals("0", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.empty(); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteSingleChunkIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteSingleChunkIT.java new file mode 100644 index 000000000..c654c96b5 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteSingleChunkIT.java @@ -0,0 +1,43 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.*; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteSingleChunkIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=60MB") +class PerformDepositThenDeleteSingleChunkIT extends BasePerformDepositThenDeleteIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("60MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(1); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesManyMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesManyMultiChunksIT.java new file mode 100644 index 000000000..f5b448dc2 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesManyMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoArchivesManyMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=500KB") +class PerformDepositThenDeleteTwoArchivesManyMultiChunksIT extends BasePerformDepositThenDeleteTwoArchivesIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("500KB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(101); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesMultiChunksIT.java new file mode 100644 index 000000000..9956c2609 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoArchivesMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=20MB") +class PerformDepositThenDeleteTwoArchivesMultiChunksIT extends BasePerformDepositThenDeleteTwoArchivesIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("20MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(3); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesNoChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesNoChunksIT.java new file mode 100644 index 000000000..f3150b21d --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesNoChunksIT.java @@ -0,0 +1,47 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.junit.Ignore; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoArchivesNoChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = {"chunking.enabled=false","chunking.size=0"}) +@Ignore +class PerformDepositThenDeleteTwoArchivesNoChunksIT extends BasePerformDepositThenDeleteTwoArchivesIT { + + @Override + + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertFalse(chunkingEnabled); + assertEquals("0", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.empty(); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesSingleChunkIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesSingleChunkIT.java new file mode 100644 index 000000000..8a234b726 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoArchivesSingleChunkIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoArchivesSingleChunkIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=60MB") +class PerformDepositThenDeleteTwoArchivesSingleChunkIT extends BasePerformDepositThenDeleteTwoArchivesIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("60MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(1); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationManyMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationManyMultiChunksIT.java new file mode 100644 index 000000000..675b06a89 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationManyMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoLocationManyMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=500KB") +class PerformDepositThenDeleteTwoLocationManyMultiChunksIT extends BasePerformDepositThenDeleteTwoLocationsIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("500KB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(101); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsMultiChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsMultiChunksIT.java new file mode 100644 index 000000000..627cdf582 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsMultiChunksIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoLocationsMultiChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=20MB") +class PerformDepositThenDeleteTwoLocationsMultiChunksIT extends BasePerformDepositThenDeleteTwoLocationsIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("20MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(3); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsNoChunksIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsNoChunksIT.java new file mode 100644 index 000000000..10e8a629b --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsNoChunksIT.java @@ -0,0 +1,45 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoLocationsNoChunksIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = {"chunking.enabled=false","chunking.size=0"}) +class PerformDepositThenDeleteTwoLocationsNoChunksIT extends BasePerformDepositThenDeleteIT { + + @Override + + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertFalse(chunkingEnabled); + assertEquals("0", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.empty(); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsSingleChunkIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsSingleChunkIT.java new file mode 100644 index 000000000..3007af37b --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformDepositThenDeleteTwoLocationsSingleChunkIT.java @@ -0,0 +1,44 @@ +package org.datavaultplatform.worker.tasks; + +import lombok.extern.slf4j.Slf4j; +import org.datavaultplatform.worker.app.DataVaultWorkerInstanceApp; +import org.datavaultplatform.worker.test.AddTestProperties; +import org.slf4j.Logger; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.TestPropertySource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SpringBootTest(classes = { + DataVaultWorkerInstanceApp.class, + PerformDepositThenDeleteTwoLocationsSingleChunkIT.TestConfig.class +}) +@Slf4j +@AddTestProperties +@TestPropertySource(properties = "chunking.size=60MB") +class PerformDepositThenDeleteTwoLocationsSingleChunkIT extends BasePerformDepositThenDeleteTwoLocationsIT { + + @Override + void checkChunkingProps(boolean chunkingEnabled, String chunkingByteSize) { + assertTrue(chunkingEnabled); + assertEquals("60MB", chunkingByteSize); + } + + @Override + public Optional getExpectedNumberChunksPerDeposit() { + return Optional.of(1); + } + + @TestConfiguration + static class TestConfig { + @Bean + Logger monitorLogger() { + return log; + } + } +} \ No newline at end of file diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformMultiDepositDirsThenRetrieveMultiChunkIT.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformMultiDepositDirsThenRetrieveMultiChunkIT.java index d9251f169..63cc6a23d 100644 --- a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformMultiDepositDirsThenRetrieveMultiChunkIT.java +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/PerformMultiDepositDirsThenRetrieveMultiChunkIT.java @@ -44,8 +44,9 @@ Optional getExpectedNumberChunks() { @Override protected void checkDepositEvents() { List storedChunksEvents = getCopyUploadCompleteEvents(); - assertThat(storedChunksEvents.size()).isEqualTo(8); + assertThat(storedChunksEvents).hasSize(8); } + @TestConfiguration static class TestConfig { @Bean diff --git a/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/delete/RetrieveStateTest.java b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/delete/RetrieveStateTest.java new file mode 100644 index 000000000..38207c4e3 --- /dev/null +++ b/datavault-worker/src/test/java/org/datavaultplatform/worker/tasks/delete/RetrieveStateTest.java @@ -0,0 +1,19 @@ +package org.datavaultplatform.worker.tasks.delete; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; + +import static org.assertj.core.api.Assertions.assertThat; + +class DeleteStateTest { + + @Test + void testGetDeleteStates(){ + ArrayList states = new ArrayList<>(); + states.add("Deleting from archive"); // 0 + states.add("Delete complete"); // 1 + assertThat(DeleteState.getDeleteStates()).isEqualTo(states); + } + +} \ No newline at end of file diff --git a/datavault-worker/src/test/resources/sampleMessages/sampleDepositMessageTwoArchiveStores.json b/datavault-worker/src/test/resources/sampleMessages/sampleDepositMessageTwoArchiveStores.json new file mode 100644 index 000000000..90df3bca8 --- /dev/null +++ b/datavault-worker/src/test/resources/sampleMessages/sampleDepositMessageTwoArchiveStores.json @@ -0,0 +1,48 @@ +{ + "taskClass" : "org.datavaultplatform.worker.tasks.Deposit", + "jobID" : "test-job-id", + "properties" : { + "bagId" : "bf73a7f5-42d1-4c3f-864a-a171af8373d4", + "vaultMetadata" : "\"VAULT\":\"META-DATA\"", + "depositId" : "deposit-id-123", + "depositMetadata" : "\"DEPOSIT\":\"META-DATA\"", + "userId" : "used-id-one" + }, + "fileStorePaths" : [ "FILE-STORE-SRC-ID/src-path-1" ], + "fileUploadPaths" : [ "src-file-upload-handle" ], + "archiveFileStores" : [ { + "id" : "ARCHIVE-STORE-DST-ID-1", + "storageClass" : "org.datavaultplatform.common.storage.impl.LocalFileSystem", + "label" : "ARCHIVE-STORE-DST-LABEL-1", + "retrieveEnabled" : true, + "properties" : { + "rootPath" : "/tmp/dv/dest1" + } + },{ + "id" : "ARCHIVE-STORE-DST-ID-2", + "storageClass" : "org.datavaultplatform.common.storage.impl.LocalFileSystem", + "label" : "ARCHIVE-STORE-DST-LABEL-2", + "retrieveEnabled" : true, + "properties" : { + "rootPath" : "/tmp/dv/dest2" + } + } ], + "userFileStoreProperties" : { + "FILE-STORE-SRC-ID" : { + "rootPath" : "/tmp/dv/src" + } + }, + "userFileStoreClasses" : { + "FILE-STORE-SRC-ID" : "org.datavaultplatform.common.storage.impl.LocalFileSystem" + }, + "chunkFilesDigest" : null, + "tarIV" : null, + "chunksIVs" : null, + "encTarDigest" : null, + "encChunksDigest" : null, + "lastEvent" : null, + "chunksToAudit" : null, + "archiveIds" : null, + "restartArchiveIds" : { }, + "redeliver" : false +} \ No newline at end of file diff --git a/pom.xml b/pom.xml index 39cbf93b4..ef79d4d2e 100644 --- a/pom.xml +++ b/pom.xml @@ -68,7 +68,7 @@ 12.1.0 - 1.20.6 + 1.21.4 2.18.3