diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index bdd9f40..0000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,117 +0,0 @@ -name: Java CI with Maven - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Set up JDK 17 - uses: actions/setup-java@v4 - with: - java-version: '17' - distribution: 'temurin' - cache: maven - - - name: Build with Maven - run: mvn -B package --file pom.xml - - - name: Run tests - run: mvn -B test - - - name: Generate test coverage report - run: mvn verify - - - name: Upload test coverage - uses: actions/upload-artifact@v4 - with: - name: test-coverage-report - path: target/site/jacoco/ - - # Add this step to make the JAR available for the Docker job - - name: Upload JAR artifact - uses: actions/upload-artifact@v4 - with: - name: app-jar - path: target/*.jar - - sonarcloud: - name: SonarCloud Analysis - runs-on: ubuntu-latest - needs: build - - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up JDK 17 - uses: actions/setup-java@v4 - with: - java-version: '17' - distribution: 'temurin' - - - name: Cache SonarCloud packages - uses: actions/cache@v4 - with: - path: ~/.sonar/cache - key: ${{ runner.os }}-sonar - restore-keys: ${{ runner.os }}-sonar - - - name: Build and analyze - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: > - mvn -B verify sonar:sonar - -Dsonar.projectKey=${{ secrets.SONAR_PROJECT_KEY }} - -Dsonar.organization=${{ secrets.SONAR_ORGANIZATION }} - -Dsonar.host.url=https://sonarcloud.io - -Dsonar.scanner.force-ci-analysis=true - continue-on-error: true - - docker: - name: Docker Build and Push - runs-on: ubuntu-latest - needs: [build, sonarcloud] - if: github.event_name == 'push' && github.ref == 'refs/heads/main' - - steps: - - uses: actions/checkout@v4 - - # Download the JAR file that was built in the build job - - name: Download JAR - uses: actions/download-artifact@v4 - with: - name: app-jar - path: app - - # List the contents to confirm the JAR exists - - name: List downloaded files - run: ls -la app/ - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push - uses: docker/build-push-action@v5 - with: - context: . - push: true - tags: kenzycodex/distributed-storage:latest,kenzycodex/distributed-storage:${{ github.sha }} - # Add build args to specify the JAR location - build-args: | - JAR_FILE=app/*.jar diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..67ec0ef --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,340 @@ +name: CI/CD Pipeline + +on: + push: + branches: [ main, develop, 'feature/**' ] + pull_request: + branches: [ main, develop ] + +env: + JAVA_VERSION: '17' + MAVEN_OPTS: -Xmx1024m + +jobs: + test: + name: Test and Build + runs-on: ubuntu-latest + + services: + mysql: + image: mysql:8.0 + env: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: loadbalancer + MYSQL_USER: loadbalancer + MYSQL_PASSWORD: loadbalancer + ports: + - 3306:3306 + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + cache: maven + + - name: Build Load Balancer + run: | + mvn clean compile -q + mvn package -DskipTests + + - name: Build Storage Node + run: | + cd storage-node + mvn clean compile -q + mvn package -DskipTests + + - name: Run Load Balancer Tests + run: mvn test + env: + SPRING_DATASOURCE_URL: jdbc:mysql://localhost:3306/loadbalancer + SPRING_DATASOURCE_USERNAME: loadbalancer + SPRING_DATASOURCE_PASSWORD: loadbalancer + + - name: Run Storage Node Tests + run: | + cd storage-node + mvn test + + - name: Generate Test Coverage + run: | + mvn jacoco:report + cd storage-node && mvn jacoco:report + + - name: Upload Load Balancer Coverage + uses: actions/upload-artifact@v4 + with: + name: loadbalancer-coverage + path: target/site/jacoco/ + + - name: Upload Storage Node Coverage + uses: actions/upload-artifact@v4 + with: + name: storagenode-coverage + path: storage-node/target/site/jacoco/ + + - name: Upload Load Balancer JAR + uses: actions/upload-artifact@v4 + with: + name: loadbalancer-jar + path: target/*.jar + + - name: Upload Storage Node JAR + uses: actions/upload-artifact@v4 + with: + name: storagenode-jar + path: storage-node/target/*.jar + + code-quality: + name: Code Quality Analysis + runs-on: ubuntu-latest + needs: test + if: github.event_name == 'push' || github.event_name == 'pull_request' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + + - name: Cache SonarCloud packages + uses: actions/cache@v4 + with: + path: ~/.sonar/cache + key: ${{ runner.os }}-sonar + restore-keys: ${{ runner.os }}-sonar + + - name: Build and analyze Load Balancer + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + run: | + if [ -n "$SONAR_TOKEN" ]; then + mvn -B verify sonar:sonar \ + -Dsonar.projectKey=${{ secrets.SONAR_PROJECT_KEY }} \ + -Dsonar.organization=${{ secrets.SONAR_ORGANIZATION }} \ + -Dsonar.host.url=https://sonarcloud.io + else + echo "SonarCloud token not available, skipping analysis" + fi + continue-on-error: true + + docker-build: + name: Docker Build and Test + runs-on: ubuntu-latest + needs: test + if: github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Download Load Balancer JAR + uses: actions/download-artifact@v4 + with: + name: loadbalancer-jar + path: target/ + + - name: Download Storage Node JAR + uses: actions/download-artifact@v4 + with: + name: storagenode-jar + path: storage-node/target/ + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Load Balancer Image + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + push: false + tags: distributed-storage-loadbalancer:test + build-args: | + JAR_FILE=target/*.jar + + - name: Build Storage Node Image + uses: docker/build-push-action@v5 + with: + context: ./storage-node + file: ./storage-node/Dockerfile + push: false + tags: distributed-storage-storagenode:test + build-args: | + JAR_FILE=target/*.jar + + - name: Test Docker Compose + run: | + # Create a test docker-compose that uses local images + cp docker-compose.yml docker-compose.test.yml + sed -i 's|build:|image: distributed-storage-loadbalancer:test #build:|g' docker-compose.test.yml + sed -i 's|context: \./storage-node|distributed-storage-storagenode:test #context: ./storage-node|g' docker-compose.test.yml + + # Start services + docker-compose -f docker-compose.test.yml up -d db + sleep 30 + + # Check if services can start (don't run full stack due to complexity) + echo "Docker images built successfully" + + docker-publish: + name: Publish Docker Images + runs-on: ubuntu-latest + needs: [test, docker-build] + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Download Load Balancer JAR + uses: actions/download-artifact@v4 + with: + name: loadbalancer-jar + path: target/ + + - name: Download Storage Node JAR + uses: actions/download-artifact@v4 + with: + name: storagenode-jar + path: storage-node/target/ + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + if: secrets.DOCKERHUB_USERNAME != '' + + - name: Generate version tag + id: version + run: | + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + VERSION=$(date +%Y.%m.%d)-${GITHUB_SHA::7} + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest=true" >> $GITHUB_OUTPUT + else + VERSION=${GITHUB_SHA::7} + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest=false" >> $GITHUB_OUTPUT + fi + + - name: Build and Push Load Balancer + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + push: true + tags: | + kenzycodex/distributed-storage-loadbalancer:${{ steps.version.outputs.version }} + ${{ steps.version.outputs.latest == 'true' && 'kenzycodex/distributed-storage-loadbalancer:latest' || '' }} + build-args: | + JAR_FILE=target/*.jar + if: secrets.DOCKERHUB_USERNAME != '' + + - name: Build and Push Storage Node + uses: docker/build-push-action@v5 + with: + context: ./storage-node + file: ./storage-node/Dockerfile + push: true + tags: | + kenzycodex/distributed-storage-storagenode:${{ steps.version.outputs.version }} + ${{ steps.version.outputs.latest == 'true' && 'kenzycodex/distributed-storage-storagenode:latest' || '' }} + build-args: | + JAR_FILE=target/*.jar + if: secrets.DOCKERHUB_USERNAME != '' + + create-release: + name: Create Release + runs-on: ubuntu-latest + needs: [test, docker-publish] + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: loadbalancer-jar + path: artifacts/ + + - name: Download Storage Node JAR + uses: actions/download-artifact@v4 + with: + name: storagenode-jar + path: artifacts/ + + - name: Generate version tag + id: version + run: | + VERSION=$(date +%Y.%m.%d)-${GITHUB_SHA::7} + echo "version=v${VERSION}" >> $GITHUB_OUTPUT + echo "release_name=Release ${VERSION}" >> $GITHUB_OUTPUT + + - name: Create Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.version.outputs.version }} + release_name: ${{ steps.version.outputs.release_name }} + body: | + ## What's Changed + + Automated release generated from main branch. + + ### Features in this release: + - Load balancer with multiple strategies + - Storage node service with file operations + - Docker deployment support + - Health monitoring and metrics + + ### Docker Images + - `kenzycodex/distributed-storage-loadbalancer:${{ steps.version.outputs.version }}` + - `kenzycodex/distributed-storage-storagenode:${{ steps.version.outputs.version }}` + + **Full Changelog**: https://github.com/kenzycodex/distributed-storage/commits/${{ github.sha }} + draft: false + prerelease: false + + - name: Upload Load Balancer JAR + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: artifacts/load-balancer-1.0-SNAPSHOT.jar + asset_name: distributed-storage-loadbalancer.jar + asset_content_type: application/java-archive + + - name: Upload Storage Node JAR + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: artifacts/storage-node-1.0-SNAPSHOT.jar + asset_name: distributed-storage-storagenode.jar + asset_content_type: application/java-archive \ No newline at end of file diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml new file mode 100644 index 0000000..f926b1b --- /dev/null +++ b/.github/workflows/pr-validation.yml @@ -0,0 +1,124 @@ +name: PR Validation + +on: + pull_request: + branches: [ main, develop ] + types: [opened, synchronize, reopened] + +env: + JAVA_VERSION: '17' + +jobs: + validate: + name: Validate Pull Request + runs-on: ubuntu-latest + + services: + mysql: + image: mysql:8.0 + env: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: loadbalancer + MYSQL_USER: loadbalancer + MYSQL_PASSWORD: loadbalancer + ports: + - 3306:3306 + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + + steps: + - name: Checkout PR code + uses: actions/checkout@v4 + + - name: Set up JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + cache: maven + + - name: Validate Maven projects + run: | + echo "Validating Load Balancer..." + mvn validate + echo "Validating Storage Node..." + cd storage-node && mvn validate + + - name: Compile Load Balancer + run: mvn clean compile + + - name: Compile Storage Node + run: | + cd storage-node + mvn clean compile + + - name: Run Load Balancer Tests + run: mvn test + env: + SPRING_DATASOURCE_URL: jdbc:mysql://localhost:3306/loadbalancer + SPRING_DATASOURCE_USERNAME: loadbalancer + SPRING_DATASOURCE_PASSWORD: loadbalancer + + - name: Run Storage Node Tests + run: | + cd storage-node + mvn test + + - name: Check Code Style + run: | + mvn spotless:check || echo "Code style issues found" + cd storage-node && mvn spotless:check || echo "Storage node code style issues found" + continue-on-error: true + + - name: Build Docker Images + run: | + # Build Load Balancer + mvn package -DskipTests + docker build -t test-loadbalancer . + + # Build Storage Node + cd storage-node + mvn package -DskipTests + docker build -t test-storagenode . + + - name: Test Docker Compose Configuration + run: | + docker-compose config --quiet + + - name: Security Scan + uses: securecodewarrior/github-action-add-sarif@v1 + with: + sarif-file: 'security-scan.sarif' + continue-on-error: true + + - name: Comment PR Results + uses: actions/github-script@v7 + if: always() + with: + script: | + const { context } = require('@actions/github'); + const jobStatus = '${{ job.status }}'; + + let comment = `## 🔍 PR Validation Results\n\n`; + + if (jobStatus === 'success') { + comment += `✅ **All checks passed!** This PR is ready for review.\n\n`; + comment += `### What was tested:\n`; + comment += `- ✅ Code compilation\n`; + comment += `- ✅ Unit tests\n`; + comment += `- ✅ Docker image builds\n`; + comment += `- ✅ Docker Compose configuration\n`; + } else { + comment += `❌ **Some checks failed.** Please review the issues below.\n\n`; + comment += `Check the [workflow run](${context.payload.pull_request.html_url}/checks) for details.\n`; + } + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: comment + }); \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..953ae42 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,125 @@ +name: Release + +on: + push: + tags: + - 'v*' + +env: + JAVA_VERSION: '17' + +jobs: + release: + name: Create Release + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + cache: maven + + - name: Build Load Balancer + run: mvn clean package -DskipTests + + - name: Build Storage Node + run: | + cd storage-node + mvn clean package -DskipTests + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract version from tag + id: version + run: | + VERSION=${GITHUB_REF#refs/tags/v} + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + + - name: Build and Push Load Balancer + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + push: true + tags: | + kenzycodex/distributed-storage-loadbalancer:${{ steps.version.outputs.version }} + kenzycodex/distributed-storage-loadbalancer:latest + build-args: | + JAR_FILE=target/*.jar + + - name: Build and Push Storage Node + uses: docker/build-push-action@v5 + with: + context: ./storage-node + file: ./storage-node/Dockerfile + push: true + tags: | + kenzycodex/distributed-storage-storagenode:${{ steps.version.outputs.version }} + kenzycodex/distributed-storage-storagenode:latest + build-args: | + JAR_FILE=target/*.jar + + - name: Create GitHub Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.version.outputs.tag }} + release_name: Release ${{ steps.version.outputs.version }} + body: | + ## DistributedStorage ${{ steps.version.outputs.version }} + + ### What's New + + Check the [CHANGELOG](CHANGELOG.md) for detailed changes. + + ### Docker Images + ```bash + docker pull kenzycodex/distributed-storage-loadbalancer:${{ steps.version.outputs.version }} + docker pull kenzycodex/distributed-storage-storagenode:${{ steps.version.outputs.version }} + ``` + + ### Quick Start + ```bash + git clone https://github.com/kenzycodex/distributed-storage.git + cd distributed-storage + git checkout ${{ steps.version.outputs.tag }} + docker-compose up -d + ``` + + **Full Changelog**: https://github.com/kenzycodex/distributed-storage/compare/v${{ steps.version.outputs.version }}...HEAD + draft: false + prerelease: false + + - name: Upload Load Balancer JAR + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: target/load-balancer-1.0-SNAPSHOT.jar + asset_name: distributed-storage-loadbalancer-${{ steps.version.outputs.version }}.jar + asset_content_type: application/java-archive + + - name: Upload Storage Node JAR + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: storage-node/target/storage-node-1.0-SNAPSHOT.jar + asset_name: distributed-storage-storagenode-${{ steps.version.outputs.version }}.jar + asset_content_type: application/java-archive \ No newline at end of file diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..afaf360 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.0.0 \ No newline at end of file diff --git a/docs/VERSIONING.md b/docs/VERSIONING.md new file mode 100644 index 0000000..31278f4 --- /dev/null +++ b/docs/VERSIONING.md @@ -0,0 +1,179 @@ +# Versioning Strategy + +This document describes the versioning strategy used for DistributedStorage. + +## Version Format + +We follow [Semantic Versioning (SemVer)](https://semver.org/) with the format `MAJOR.MINOR.PATCH`: + +- **MAJOR**: Incremented for incompatible API changes +- **MINOR**: Incremented for backwards-compatible functionality additions +- **PATCH**: Incremented for backwards-compatible bug fixes + +## Version Management + +### Automated Versioning + +The project uses automated version management through: + +1. **Version Manager Script**: `scripts/version-manager.sh` +2. **GitHub Actions**: Automatic version tagging on main branch merges +3. **Maven Versions Plugin**: Updates all pom.xml files consistently + +### Version Manager Usage + +```bash +# Show current version +./scripts/version-manager.sh current + +# Increment patch version (1.0.0 → 1.0.1) +./scripts/version-manager.sh patch + +# Increment minor version (1.0.1 → 1.1.0) +./scripts/version-manager.sh minor + +# Increment major version (1.1.0 → 2.0.0) +./scripts/version-manager.sh major +``` + +## Release Process + +### 1. Feature Development +- Create feature branch: `feature/description` +- Develop and test changes +- Create pull request to `main` + +### 2. Version Increment +When merging to `main`, determine version bump: + +- **Patch**: Bug fixes, documentation updates, minor improvements +- **Minor**: New features, API additions (backwards compatible) +- **Major**: Breaking changes, API removals/changes + +### 3. Automated Release +On push to `main`: +1. CI/CD pipeline runs tests and builds +2. Automatic version tag creation +3. Docker images published to registry +4. GitHub release created with artifacts + +### 4. Manual Release (if needed) +```bash +# Update version manually +./scripts/version-manager.sh minor + +# Push tag to trigger release +git push origin v1.1.0 +``` + +## Branch Strategy + +- **main**: Production-ready code, triggers releases +- **develop**: Integration branch (optional) +- **feature/\***: Feature development branches +- **hotfix/\***: Critical bug fixes +- **release/\***: Release preparation branches + +## Version Tags + +Git tags follow the format: +- `v1.0.0` - Release versions +- `v1.0.0-rc.1` - Release candidates +- `v1.0.0-beta.1` - Beta releases +- `v1.0.0-alpha.1` - Alpha releases + +## Docker Image Versioning + +Docker images are tagged with: +- **Latest**: `kenzycodex/distributed-storage-loadbalancer:latest` +- **Version**: `kenzycodex/distributed-storage-loadbalancer:1.0.0` +- **SHA**: `kenzycodex/distributed-storage-loadbalancer:abc1234` + +## Backwards Compatibility + +### Major Version (Breaking Changes) +- API endpoint changes +- Configuration format changes +- Database schema changes requiring migration +- Java version requirement changes + +### Minor Version (New Features) +- New API endpoints +- New configuration options +- New load balancing strategies +- Performance improvements + +### Patch Version (Bug Fixes) +- Bug fixes +- Security patches +- Documentation updates +- Dependency updates (compatible) + +## Deprecation Policy + +Before removing features in a major version: +1. Mark as deprecated in minor version +2. Add deprecation warnings +3. Document migration path +4. Provide at least one minor version notice + +## Examples + +### Version History +``` +v1.0.0 - Initial release with core functionality +v1.1.0 - Added file metadata persistence +v1.2.0 - Added authentication and RBAC +v1.2.1 - Fixed security vulnerability +v1.3.0 - Added file replication +v2.0.0 - API redesign, breaking changes +``` + +### Breaking Changes (Major) +```java +// v1.x.x API +@PostMapping("/upload") +public ResponseEntity upload(MultipartFile file) + +// v2.0.0 API (breaking change) +@PostMapping("/files") +public ResponseEntity createFile(FileRequest request) +``` + +### New Features (Minor) +```java +// v1.1.0 - Added new endpoint (non-breaking) +@GetMapping("/files/{id}/metadata") +public ResponseEntity getMetadata(@PathVariable Long id) +``` + +## Version File Locations + +- `VERSION` - Main version file +- `pom.xml` - Maven project version +- `storage-node/pom.xml` - Storage node version +- `CHANGELOG.md` - Human-readable change log + +## Automation + +The versioning process is automated through: + +1. **GitHub Actions**: `.github/workflows/ci.yml` +2. **Version Script**: `scripts/version-manager.sh` +3. **Maven Plugins**: Versions plugin for dependency management +4. **Docker Labels**: Images tagged with version metadata + +## Best Practices + +1. **Always update CHANGELOG.md** with user-facing changes +2. **Test thoroughly** before version increments +3. **Use semantic commit messages** for automatic changelog generation +4. **Tag important milestones** even if not formal releases +5. **Document breaking changes** clearly in release notes + +## Monitoring Versions + +- **GitHub Releases**: Track all versions and downloads +- **Docker Hub**: Monitor image pulls by version +- **Metrics**: Version-specific usage analytics +- **Dependency Scanning**: Security alerts by version \ No newline at end of file diff --git a/scripts/version-manager.sh b/scripts/version-manager.sh new file mode 100644 index 0000000..7c2d66d --- /dev/null +++ b/scripts/version-manager.sh @@ -0,0 +1,205 @@ +#!/bin/bash + +# Version Manager for DistributedStorage +# Usage: ./scripts/version-manager.sh [major|minor|patch|current] + +set -e + +VERSION_FILE="VERSION" +CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" + exit 1 +} + +# Get current version +get_current_version() { + if [[ -f "$VERSION_FILE" ]]; then + cat "$VERSION_FILE" + else + echo "1.0.0" + fi +} + +# Parse version into components +parse_version() { + local version=$1 + IFS='.' read -r major minor patch <<< "$version" + echo "$major $minor $patch" +} + +# Increment version +increment_version() { + local type=$1 + local current_version=$(get_current_version) + read -r major minor patch <<< $(parse_version "$current_version") + + case $type in + major) + major=$((major + 1)) + minor=0 + patch=0 + ;; + minor) + minor=$((minor + 1)) + patch=0 + ;; + patch) + patch=$((patch + 1)) + ;; + *) + error "Invalid version type: $type. Use major, minor, or patch" + ;; + esac + + echo "$major.$minor.$patch" +} + +# Update version in files +update_version_files() { + local new_version=$1 + + log "Updating version to $new_version in project files..." + + # Update VERSION file + echo "$new_version" > "$VERSION_FILE" + + # Update pom.xml files + mvn versions:set -DnewVersion="$new_version" -DgenerateBackupPoms=false + cd storage-node && mvn versions:set -DnewVersion="$new_version" -DgenerateBackupPoms=false && cd .. + + # Update CHANGELOG.md + local today=$(date +%Y-%m-%d) + if [[ -f "CHANGELOG.md" ]]; then + # Create backup + cp CHANGELOG.md CHANGELOG.md.bak + + # Add new version entry + { + head -n 5 CHANGELOG.md + echo "" + echo "## [v$new_version] - $today" + echo "" + echo "### Added" + echo "- Version bump to $new_version" + echo "" + tail -n +6 CHANGELOG.md + } > CHANGELOG.md.tmp + + mv CHANGELOG.md.tmp CHANGELOG.md + rm CHANGELOG.md.bak + fi + + success "Version updated to $new_version" +} + +# Create git tag +create_git_tag() { + local version=$1 + local tag="v$version" + + log "Creating git tag $tag..." + + if git rev-parse "$tag" >/dev/null 2>&1; then + warning "Tag $tag already exists" + return + fi + + git add . + git commit -m "Bump version to $version" || true + git tag -a "$tag" -m "Release version $version" + + success "Created git tag $tag" + log "To push the tag: git push origin $tag" +} + +# Main function +main() { + local action=${1:-current} + + case $action in + current) + local current=$(get_current_version) + echo "Current version: $current" + ;; + major|minor|patch) + local current=$(get_current_version) + local new_version=$(increment_version "$action") + + log "Current version: $current" + log "New version: $new_version" + + read -p "Do you want to update to version $new_version? (y/N): " -n 1 -r + echo + + if [[ $REPLY =~ ^[Yy]$ ]]; then + update_version_files "$new_version" + + read -p "Create git tag? (y/N): " -n 1 -r + echo + + if [[ $REPLY =~ ^[Yy]$ ]]; then + create_git_tag "$new_version" + fi + else + log "Version update cancelled" + fi + ;; + help|--help|-h) + cat << EOF +Version Manager for DistributedStorage + +Usage: $0 [COMMAND] + +Commands: + current Show current version + major Increment major version (x.0.0) + minor Increment minor version (x.y.0) + patch Increment patch version (x.y.z) + help Show this help message + +Examples: + $0 current # Show current version + $0 patch # Increment patch version + $0 minor # Increment minor version + $0 major # Increment major version + +The script will: +1. Update VERSION file +2. Update Maven pom.xml files +3. Update CHANGELOG.md +4. Optionally create git commit and tag +EOF + ;; + *) + error "Unknown command: $action. Use 'help' for usage information" + ;; + esac +} + +# Check if running from correct directory +if [[ ! -f "pom.xml" ]] || [[ ! -d "storage-node" ]]; then + error "Please run this script from the project root directory" +fi + +main "$@" \ No newline at end of file diff --git a/src/main/java/com/loadbalancer/controller/FileController.java b/src/main/java/com/loadbalancer/controller/FileController.java index f417f7e..a12ba3e 100644 --- a/src/main/java/com/loadbalancer/controller/FileController.java +++ b/src/main/java/com/loadbalancer/controller/FileController.java @@ -2,6 +2,7 @@ import com.loadbalancer.exception.FileDownloadException; import com.loadbalancer.exception.FileOperationException; +import com.loadbalancer.model.entity.FileMetadata; import com.loadbalancer.model.entity.StorageNode; import com.loadbalancer.service.LoadBalancerService; import com.loadbalancer.service.StorageNodeService; @@ -92,6 +93,27 @@ public ResponseEntity> uploadFile( HashMap.class ); + if (responseMap != null && responseMap.containsKey("fileId")) { + // Store metadata in database + Long fileId = Long.valueOf(responseMap.get("fileId").toString()); + String storedFilename = responseMap.get("fileName") != null ? + responseMap.get("fileName").toString() : file.getOriginalFilename(); + + loadBalancerService.storeFileMetadata( + file.getOriginalFilename(), + storedFilename, + file.getSize(), + file.getContentType(), + selectedNode.getContainerId(), + userId, + null // checksum - can be added later + ); + + // Update response with node information + responseMap.put("nodeId", selectedNode.getContainerId()); + responseMap.put("nodeName", selectedNode.getContainerName()); + } + long duration = System.currentTimeMillis() - startTime; loadBalancerService.recordRequest(selectedNode.getContainerId().toString(), true, duration); @@ -161,6 +183,9 @@ public ResponseEntity downloadFile( restTemplate.exchange( downloadUrl, HttpMethod.GET, new HttpEntity<>(headers), byte[].class); + // Update file access time + loadBalancerService.updateFileAccess(fileId); + long duration = System.currentTimeMillis() - startTime; loadBalancerService.recordRequest(node.getContainerId().toString(), true, duration); @@ -200,6 +225,9 @@ public ResponseEntity> deleteFile( restTemplate.exchange(deleteUrl, HttpMethod.DELETE, new HttpEntity<>(headers), Void.class); + // Mark file as deleted in database + loadBalancerService.deleteFileMetadata(fileId); + long duration = System.currentTimeMillis() - startTime; loadBalancerService.recordRequest(node.getContainerId().toString(), true, duration); diff --git a/src/main/java/com/loadbalancer/controller/FileMetadataController.java b/src/main/java/com/loadbalancer/controller/FileMetadataController.java new file mode 100644 index 0000000..41a2677 --- /dev/null +++ b/src/main/java/com/loadbalancer/controller/FileMetadataController.java @@ -0,0 +1,69 @@ +package com.loadbalancer.controller; + +import com.loadbalancer.model.entity.FileMetadata; +import com.loadbalancer.service.FileMetadataService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +@RestController +@RequestMapping("/api/v1/metadata") +@RequiredArgsConstructor +@Slf4j +public class FileMetadataController { + private final FileMetadataService fileMetadataService; + + @GetMapping("/files/{fileId}") + public ResponseEntity getFileMetadata(@PathVariable Long fileId) { + Optional metadata = fileMetadataService.getFileMetadata(fileId); + return metadata.map(ResponseEntity::ok) + .orElse(ResponseEntity.notFound().build()); + } + + @GetMapping("/files/user/{userId}") + public ResponseEntity> getUserFiles( + @PathVariable Long userId, + @RequestParam(defaultValue = "0") int page, + @RequestParam(defaultValue = "20") int size) { + List files = fileMetadataService.getFilesByUser(userId); + return ResponseEntity.ok(files); + } + + @GetMapping("/files/node/{nodeId}") + public ResponseEntity> getNodeFiles(@PathVariable Long nodeId) { + List files = fileMetadataService.getFilesByNode(nodeId); + return ResponseEntity.ok(files); + } + + @GetMapping("/files/search") + public ResponseEntity> searchFiles( + @RequestParam String filename) { + List files = fileMetadataService.searchFilesByName(filename); + return ResponseEntity.ok(files); + } + + @GetMapping("/storage/statistics") + public ResponseEntity> getStorageStatistics() { + Map stats = + fileMetadataService.getNodeStorageStatistics(); + return ResponseEntity.ok(stats); + } + + @GetMapping("/files/inactive") + public ResponseEntity> getInactiveFiles( + @RequestParam(defaultValue = "30") int daysOld) { + List files = fileMetadataService.getInactiveFiles(daysOld); + return ResponseEntity.ok(files); + } + + @PostMapping("/files/{fileId}/access") + public ResponseEntity updateFileAccess(@PathVariable Long fileId) { + fileMetadataService.updateLastAccessed(fileId); + return ResponseEntity.ok().build(); + } +} \ No newline at end of file diff --git a/src/main/java/com/loadbalancer/model/entity/FileMetadata.java b/src/main/java/com/loadbalancer/model/entity/FileMetadata.java new file mode 100644 index 0000000..6539063 --- /dev/null +++ b/src/main/java/com/loadbalancer/model/entity/FileMetadata.java @@ -0,0 +1,79 @@ +package com.loadbalancer.model.entity; + +import jakarta.persistence.*; +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import jakarta.validation.constraints.Positive; +import java.time.LocalDateTime; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import org.hibernate.annotations.DynamicUpdate; + +@Entity +@Table(name = "FileMetadata") +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +@DynamicUpdate +public class FileMetadata { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "file_id") + private Long fileId; + + @NotBlank(message = "Original filename cannot be blank") + @Column(name = "original_filename", nullable = false) + private String originalFilename; + + @NotBlank(message = "Stored filename cannot be blank") + @Column(name = "stored_filename", nullable = false) + private String storedFilename; + + @NotNull(message = "File size cannot be null") + @Positive(message = "File size must be positive") + @Column(name = "file_size", nullable = false) + private Long fileSize; + + @Column(name = "content_type") + private String contentType; + + @NotNull(message = "Node ID cannot be null") + @Column(name = "node_id", nullable = false) + private Long nodeId; + + @NotNull(message = "User ID cannot be null") + @Column(name = "user_id", nullable = false) + private Long userId; + + @Column(name = "checksum") + private String checksum; + + @Builder.Default + @Column(name = "is_active", nullable = false) + private Boolean isActive = true; + + @Column(name = "upload_time", nullable = false, updatable = false) + private LocalDateTime uploadTime; + + @Column(name = "last_accessed") + private LocalDateTime lastAccessed; + + @Builder.Default + @Version + @Column(name = "version", nullable = false) + private Integer version = 0; + + @PrePersist + protected void onCreate() { + this.uploadTime = LocalDateTime.now(); + this.lastAccessed = LocalDateTime.now(); + } + + @PreUpdate + protected void onUpdate() { + this.lastAccessed = LocalDateTime.now(); + } +} \ No newline at end of file diff --git a/src/main/java/com/loadbalancer/repository/FileMetadataRepository.java b/src/main/java/com/loadbalancer/repository/FileMetadataRepository.java new file mode 100644 index 0000000..d615d12 --- /dev/null +++ b/src/main/java/com/loadbalancer/repository/FileMetadataRepository.java @@ -0,0 +1,50 @@ +package com.loadbalancer.repository; + +import com.loadbalancer.model.entity.FileMetadata; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Modifying; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.Optional; + +@Repository +public interface FileMetadataRepository extends JpaRepository { + + Optional findByFileIdAndIsActiveTrue(Long fileId); + + List findByNodeIdAndIsActiveTrue(Long nodeId); + + List findByUserIdAndIsActiveTrue(Long userId); + + @Query("SELECT fm FROM FileMetadata fm WHERE fm.nodeId = :nodeId AND fm.isActive = true") + List findActiveFilesByNode(@Param("nodeId") Long nodeId); + + @Query("SELECT COUNT(fm) FROM FileMetadata fm WHERE fm.nodeId = :nodeId AND fm.isActive = true") + Long countActiveFilesByNode(@Param("nodeId") Long nodeId); + + @Query("SELECT SUM(fm.fileSize) FROM FileMetadata fm WHERE fm.nodeId = :nodeId AND fm.isActive = true") + Long getTotalSizeByNode(@Param("nodeId") Long nodeId); + + @Query("SELECT fm FROM FileMetadata fm WHERE fm.lastAccessed < :cutoffTime AND fm.isActive = true") + List findInactiveFiles(@Param("cutoffTime") LocalDateTime cutoffTime); + + @Modifying + @Query("UPDATE FileMetadata fm SET fm.isActive = false WHERE fm.fileId = :fileId") + int markAsDeleted(@Param("fileId") Long fileId); + + @Modifying + @Query("UPDATE FileMetadata fm SET fm.isActive = false WHERE fm.nodeId = :nodeId") + int markAllFilesAsDeletedForNode(@Param("nodeId") Long nodeId); + + @Query("SELECT fm.nodeId, COUNT(fm) as fileCount, SUM(fm.fileSize) as totalSize " + + "FROM FileMetadata fm WHERE fm.isActive = true GROUP BY fm.nodeId") + List getNodeStorageStatistics(); + + List findByOriginalFilenameContainingIgnoreCaseAndIsActiveTrue(String filename); + + boolean existsByFileIdAndIsActiveTrue(Long fileId); +} \ No newline at end of file diff --git a/src/main/java/com/loadbalancer/service/FileMetadataService.java b/src/main/java/com/loadbalancer/service/FileMetadataService.java new file mode 100644 index 0000000..9a55dbf --- /dev/null +++ b/src/main/java/com/loadbalancer/service/FileMetadataService.java @@ -0,0 +1,142 @@ +package com.loadbalancer.service; + +import com.loadbalancer.model.entity.FileMetadata; +import com.loadbalancer.model.entity.StorageNode; +import com.loadbalancer.repository.FileMetadataRepository; +import jakarta.persistence.EntityNotFoundException; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +@Service +@Slf4j +@RequiredArgsConstructor +public class FileMetadataService { + private final FileMetadataRepository fileMetadataRepository; + private final StorageNodeService storageNodeService; + + @Transactional + public FileMetadata createFileMetadata(String originalFilename, String storedFilename, + Long fileSize, String contentType, Long nodeId, + Long userId, String checksum) { + FileMetadata metadata = FileMetadata.builder() + .originalFilename(originalFilename) + .storedFilename(storedFilename) + .fileSize(fileSize) + .contentType(contentType) + .nodeId(nodeId) + .userId(userId) + .checksum(checksum) + .isActive(true) + .build(); + + FileMetadata savedMetadata = fileMetadataRepository.save(metadata); + log.info("Created file metadata for file {} on node {}", savedMetadata.getFileId(), nodeId); + return savedMetadata; + } + + @Transactional(readOnly = true) + public Optional getFileMetadata(Long fileId) { + return fileMetadataRepository.findByFileIdAndIsActiveTrue(fileId); + } + + @Transactional(readOnly = true) + public Optional getNodeForFile(Long fileId) { + Optional metadata = getFileMetadata(fileId); + if (metadata.isPresent()) { + return storageNodeService.getNode(metadata.get().getNodeId()); + } + return Optional.empty(); + } + + @Transactional(readOnly = true) + public List getFilesByNode(Long nodeId) { + return fileMetadataRepository.findByNodeIdAndIsActiveTrue(nodeId); + } + + @Transactional(readOnly = true) + public List getFilesByUser(Long userId) { + return fileMetadataRepository.findByUserIdAndIsActiveTrue(userId); + } + + @Transactional + public void updateLastAccessed(Long fileId) { + Optional metadata = fileMetadataRepository.findByFileIdAndIsActiveTrue(fileId); + if (metadata.isPresent()) { + metadata.get().setLastAccessed(LocalDateTime.now()); + fileMetadataRepository.save(metadata.get()); + } + } + + @Transactional + public boolean deleteFileMetadata(Long fileId) { + int updatedRows = fileMetadataRepository.markAsDeleted(fileId); + if (updatedRows > 0) { + log.info("Marked file {} as deleted", fileId); + return true; + } + return false; + } + + @Transactional + public void markAllFilesAsDeletedForNode(Long nodeId) { + int updatedRows = fileMetadataRepository.markAllFilesAsDeletedForNode(nodeId); + log.info("Marked {} files as deleted for node {}", updatedRows, nodeId); + } + + @Transactional(readOnly = true) + public boolean fileExists(Long fileId) { + return fileMetadataRepository.existsByFileIdAndIsActiveTrue(fileId); + } + + @Transactional(readOnly = true) + public Map getNodeStorageStatistics() { + List results = fileMetadataRepository.getNodeStorageStatistics(); + return results.stream().collect(Collectors.toMap( + result -> (Long) result[0], + result -> new NodeStorageStats((Long) result[1], (Long) result[2]) + )); + } + + @Transactional(readOnly = true) + public List searchFilesByName(String filename) { + return fileMetadataRepository.findByOriginalFilenameContainingIgnoreCaseAndIsActiveTrue(filename); + } + + @Transactional(readOnly = true) + public List getInactiveFiles(int daysOld) { + LocalDateTime cutoffTime = LocalDateTime.now().minusDays(daysOld); + return fileMetadataRepository.findInactiveFiles(cutoffTime); + } + + @Transactional(readOnly = true) + public Long getTotalFilesForNode(Long nodeId) { + return fileMetadataRepository.countActiveFilesByNode(nodeId); + } + + @Transactional(readOnly = true) + public Long getTotalSizeForNode(Long nodeId) { + Long totalSize = fileMetadataRepository.getTotalSizeByNode(nodeId); + return totalSize != null ? totalSize : 0L; + } + + public static class NodeStorageStats { + private final Long fileCount; + private final Long totalSize; + + public NodeStorageStats(Long fileCount, Long totalSize) { + this.fileCount = fileCount != null ? fileCount : 0L; + this.totalSize = totalSize != null ? totalSize : 0L; + } + + public Long getFileCount() { return fileCount; } + public Long getTotalSize() { return totalSize; } + } +} \ No newline at end of file diff --git a/src/main/java/com/loadbalancer/service/LoadBalancerService.java b/src/main/java/com/loadbalancer/service/LoadBalancerService.java index 158ba30..6c2325e 100644 --- a/src/main/java/com/loadbalancer/service/LoadBalancerService.java +++ b/src/main/java/com/loadbalancer/service/LoadBalancerService.java @@ -3,6 +3,7 @@ import com.loadbalancer.config.LoadBalancerConfig; import com.loadbalancer.exception.NoAvailableNodesException; import com.loadbalancer.exception.StrategyNotFoundException; +import com.loadbalancer.model.entity.FileMetadata; import com.loadbalancer.model.entity.StorageNode; import com.loadbalancer.model.enums.NodeStatus; import com.loadbalancer.strategy.LoadBalancerStrategy; @@ -21,12 +22,13 @@ public class LoadBalancerService { private final Map strategies; private final StorageNodeService storageNodeService; + private final FileMetadataService fileMetadataService; private final MetricsService metricsService; private final LoadBalancerConfig config; private final RestTemplate restTemplate; private final Map nodeConnectionCounts = new ConcurrentHashMap<>(); - // Cache to store file-to-node mapping + // Cache to store file-to-node mapping for quick access private final Map fileNodeCache = new ConcurrentHashMap<>(); public StorageNode selectNode(String strategyName, long fileSize) { @@ -51,7 +53,15 @@ public StorageNode selectNode(String strategyName, long fileSize) { } public StorageNode getNodeForFile(Long fileId) { - // First check the cache + // First check the database for persistent mapping + Optional nodeFromDb = fileMetadataService.getNodeForFile(fileId); + if (nodeFromDb.isPresent() && nodeFromDb.get().getStatus() == NodeStatus.ACTIVE) { + // Update cache and return node + fileNodeCache.put(fileId, nodeFromDb.get().getContainerId()); + return nodeFromDb.get(); + } + + // If not in database, check cache as fallback Long nodeId = fileNodeCache.get(fileId); if (nodeId != null) { Optional cachedNode = storageNodeService.getNode(nodeId); @@ -60,7 +70,7 @@ public StorageNode getNodeForFile(Long fileId) { } } - // If not in cache or node not active, query each storage node + // Last resort: query each storage node (for legacy files not in database) List activeNodes = storageNodeService.getAvailableNodes(); for (StorageNode node : activeNodes) { try { @@ -73,6 +83,8 @@ public StorageNode getNodeForFile(Long fileId) { if (Boolean.TRUE.equals(exists)) { // Update cache and return node fileNodeCache.put(fileId, node.getContainerId()); + log.warn("Found file {} on node {} but not in database - consider data migration", + fileId, node.getContainerId()); return node; } } catch (Exception e) { @@ -84,6 +96,25 @@ public StorageNode getNodeForFile(Long fileId) { throw new NoAvailableNodesException("No node found containing file: " + fileId); } + public FileMetadata storeFileMetadata(String originalFilename, String storedFilename, + Long fileSize, String contentType, Long nodeId, + Long userId, String checksum) { + return fileMetadataService.createFileMetadata(originalFilename, storedFilename, + fileSize, contentType, nodeId, + userId, checksum); + } + + public boolean deleteFileMetadata(Long fileId) { + // Remove from cache + fileNodeCache.remove(fileId); + // Mark as deleted in database + return fileMetadataService.deleteFileMetadata(fileId); + } + + public void updateFileAccess(Long fileId) { + fileMetadataService.updateLastAccessed(fileId); + } + public void recordRequest(String nodeId, boolean success, long duration) { metricsService.recordRequest(nodeId, success, duration); } diff --git a/src/main/resources/db/migration/V2__Create_FileMetadata_Table.sql b/src/main/resources/db/migration/V2__Create_FileMetadata_Table.sql new file mode 100644 index 0000000..8170066 --- /dev/null +++ b/src/main/resources/db/migration/V2__Create_FileMetadata_Table.sql @@ -0,0 +1,79 @@ +-- Create FileMetadata table for persistent file-to-node mapping +-- Migration V2: Add file metadata persistence + +CREATE TABLE FileMetadata ( + file_id BIGINT AUTO_INCREMENT PRIMARY KEY, + original_filename VARCHAR(255) NOT NULL, + stored_filename VARCHAR(255) NOT NULL, + file_size BIGINT NOT NULL, + content_type VARCHAR(100), + node_id BIGINT NOT NULL, + user_id BIGINT NOT NULL, + checksum VARCHAR(64), + is_active BOOLEAN NOT NULL DEFAULT TRUE, + upload_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + last_accessed TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + version INT NOT NULL DEFAULT 0, + INDEX idx_file_id_active (file_id, is_active), + INDEX idx_node_id_active (node_id, is_active), + INDEX idx_user_id_active (user_id, is_active), + INDEX idx_upload_time (upload_time), + INDEX idx_last_accessed (last_accessed), + INDEX idx_original_filename (original_filename), + CONSTRAINT fk_file_metadata_node + FOREIGN KEY (node_id) + REFERENCES StorageContainers(container_id) + ON DELETE CASCADE +); + +-- Add comments for documentation +ALTER TABLE FileMetadata + COMMENT = 'Persistent metadata for files stored across distributed nodes'; + +ALTER TABLE FileMetadata + MODIFY COLUMN file_id BIGINT AUTO_INCREMENT + COMMENT 'Unique identifier for the file'; + +ALTER TABLE FileMetadata + MODIFY COLUMN original_filename VARCHAR(255) NOT NULL + COMMENT 'Original filename as uploaded by user'; + +ALTER TABLE FileMetadata + MODIFY COLUMN stored_filename VARCHAR(255) NOT NULL + COMMENT 'Filename as stored on the storage node'; + +ALTER TABLE FileMetadata + MODIFY COLUMN file_size BIGINT NOT NULL + COMMENT 'Size of the file in bytes'; + +ALTER TABLE FileMetadata + MODIFY COLUMN content_type VARCHAR(100) + COMMENT 'MIME type of the file'; + +ALTER TABLE FileMetadata + MODIFY COLUMN node_id BIGINT NOT NULL + COMMENT 'ID of the storage node containing this file'; + +ALTER TABLE FileMetadata + MODIFY COLUMN user_id BIGINT NOT NULL + COMMENT 'ID of the user who uploaded the file'; + +ALTER TABLE FileMetadata + MODIFY COLUMN checksum VARCHAR(64) + COMMENT 'File checksum for integrity verification'; + +ALTER TABLE FileMetadata + MODIFY COLUMN is_active BOOLEAN NOT NULL DEFAULT TRUE + COMMENT 'Whether the file is active (not deleted)'; + +ALTER TABLE FileMetadata + MODIFY COLUMN upload_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + COMMENT 'When the file was uploaded'; + +ALTER TABLE FileMetadata + MODIFY COLUMN last_accessed TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + COMMENT 'When the file was last accessed'; + +ALTER TABLE FileMetadata + MODIFY COLUMN version INT NOT NULL DEFAULT 0 + COMMENT 'Version number for optimistic locking'; \ No newline at end of file diff --git a/src/main/resources/db/migration/V3__Add_FileMetadata_Indexes.sql b/src/main/resources/db/migration/V3__Add_FileMetadata_Indexes.sql new file mode 100644 index 0000000..f9cdcfb --- /dev/null +++ b/src/main/resources/db/migration/V3__Add_FileMetadata_Indexes.sql @@ -0,0 +1,23 @@ +-- Additional indexes for FileMetadata table to improve query performance +-- Migration V3: Optimize FileMetadata table indexes + +-- Composite index for file search by user and activity +CREATE INDEX idx_user_active_upload ON FileMetadata (user_id, is_active, upload_time DESC); + +-- Composite index for node statistics queries +CREATE INDEX idx_node_active_size ON FileMetadata (node_id, is_active, file_size); + +-- Index for inactive file cleanup +CREATE INDEX idx_active_last_accessed ON FileMetadata (is_active, last_accessed); + +-- Full-text index for filename search (if MySQL supports it) +-- ALTER TABLE FileMetadata ADD FULLTEXT(original_filename); + +-- Index for content type filtering +CREATE INDEX idx_content_type_active ON FileMetadata (content_type, is_active); + +-- Composite index for file existence checks +CREATE INDEX idx_file_exists ON FileMetadata (file_id, is_active, node_id); + +-- Index for checksum-based duplicate detection (when checksum is implemented) +CREATE INDEX idx_checksum ON FileMetadata (checksum) WHERE checksum IS NOT NULL; \ No newline at end of file