diff --git a/.github/workflows/benchmarks-pr.yml b/.github/workflows/benchmarks-pr.yml
new file mode 100644
index 0000000..401fbf0
--- /dev/null
+++ b/.github/workflows/benchmarks-pr.yml
@@ -0,0 +1,250 @@
+name: PR Performance Validation
+
+on:
+ pull_request:
+ paths:
+ - 'Extensions.Caching.PostgreSql/**'
+ - 'Benchmarks/**'
+ types: [opened, synchronize, labeled]
+
+env:
+ DOTNET_VERSION: '9.0.x'
+
+jobs:
+ check-performance-label:
+ runs-on: ubuntu-latest
+ outputs:
+ should-run: ${{ steps.check.outputs.should-run }}
+ steps:
+ - name: Check if performance testing is requested
+ id: check
+ run: |
+ # Check if PR has 'performance' label or title contains '[perf]'
+ LABELS="${{ join(github.event.pull_request.labels.*.name, ' ') }}"
+ TITLE="${{ github.event.pull_request.title }}"
+
+ if [[ "$LABELS" == *"performance"* ]] || [[ "$TITLE" == *"[perf]"* ]]; then
+ echo "should-run=true" >> $GITHUB_OUTPUT
+ echo "Performance testing requested via label or title"
+ else
+ echo "should-run=false" >> $GITHUB_OUTPUT
+ echo "Performance testing not requested. Add 'performance' label or '[perf]' in title to run benchmarks."
+ fi
+
+ performance-validation:
+ needs: check-performance-label
+ if: needs.check-performance-label.outputs.should-run == 'true'
+ runs-on: ubuntu-latest
+ timeout-minutes: 45
+
+ permissions:
+ pull-requests: write
+ contents: read
+
+ steps:
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: ${{ env.DOTNET_VERSION }}
+
+ - name: Restore dependencies
+ run: dotnet restore Benchmarks/Benchmarks.csproj
+
+ - name: Build benchmarks
+ run: dotnet build Benchmarks/Benchmarks.csproj --configuration Release --no-restore
+
+ - name: Run core operations benchmark
+ run: dotnet run --project Benchmarks/Benchmarks.csproj --configuration Release --no-build -- core
+
+ - name: Download previous benchmark data for comparison
+ uses: dawidd6/action-download-artifact@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ workflow: benchmarks-scheduled.yml
+ branch: gh-pages
+ name: github-pages
+ path: ./previous-data
+ continue-on-error: true
+
+ - name: Store benchmark result with comparison
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ name: 'core-benchmark-pr'
+ tool: 'benchmarkdotnet'
+ output-file-path: Benchmarks/BenchmarkDotNet.Artifacts/results/Benchmarks.UseCases.CoreOperationsBenchmark-report-full-compressed.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: false # Don't push PR results to main data
+ # Show comparison with baseline in PR comments
+ alert-threshold: '120%' # More sensitive for PRs
+ comment-on-alert: true
+ fail-on-alert: false
+ # Reference main branch data for comparison
+ external-data-json-path: './previous-data/benchmarks/core-benchmark.json'
+
+ - name: Find benchmark results
+ id: find-results
+ run: |
+ GITHUB_MD_FILE=$(find Benchmarks/BenchmarkDotNet.Artifacts/results/ -name "*CoreOperationsBenchmark*github.md" | head -1)
+ if [ -f "$GITHUB_MD_FILE" ]; then
+ echo "results-file=$GITHUB_MD_FILE" >> $GITHUB_OUTPUT
+ echo "results-found=true" >> $GITHUB_OUTPUT
+ else
+ echo "results-found=false" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Comment PR with performance results
+ if: steps.find-results.outputs.results-found == 'true'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ const resultsFile = '${{ steps.find-results.outputs.results-file }}';
+
+ try {
+ const results = fs.readFileSync(resultsFile, 'utf8');
+
+ const body = `## π Performance Validation Results
+
+ This PR has been tested for performance impact on core cache operations.
+
+ **Commit:** \`${{ github.event.pull_request.head.sha }}\`
+ **Date:** ${new Date().toISOString()}
+ **Comparison:** vs main branch baseline
+
+ ### Core Operations Benchmark
+
+ ${results}
+
+ ### π Historical Context
+
+ - **[View Trends Dashboard](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/)**
+ - **[Compare with Baseline](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/core-benchmark.html)**
+
+ ### Performance Analysis
+
+ β‘ **Regression Detection:** Automatic alerts trigger if performance degrades by >20%
+ π **Trend Analysis:** Compare this PR against historical performance data
+ π― **Baseline Comparison:** Results measured against main branch performance
+
+ ---
+
+ **Note:** These results are from a GitHub Actions runner and should be used for relative comparison only.
+ For more comprehensive performance testing, consider running the full benchmark suite locally.
+
+ **Need more benchmarks?** Add specific benchmark names to your PR description:
+ - \`datasize\` - Test different payload sizes
+ - \`expiration\` - Test expiration strategies
+ - \`concurrency\` - Test concurrent operations
+ - \`bulk\` - Test bulk operations
+ `;
+
+ // Check if we already commented
+ const comments = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const existingComment = comments.data.find(comment =>
+ comment.user.login === 'github-actions[bot]' &&
+ comment.body.includes('Performance Validation Results')
+ );
+
+ if (existingComment) {
+ // Update existing comment
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: existingComment.id,
+ body: body
+ });
+ } else {
+ // Create new comment
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: body
+ });
+ }
+ } catch (error) {
+ console.error('Error reading benchmark results:', error);
+
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: `## β Performance Validation Failed
+
+ Unable to read benchmark results. Check the [workflow logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.
+
+ **Commit:** \`${{ github.event.pull_request.head.sha }}\`
+ **Error:** ${error.message}
+
+ **Dashboard:** [View Historical Trends](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/)
+ `
+ });
+ }
+
+ - name: Upload detailed results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: pr-performance-results-${{ github.event.pull_request.number }}
+ path: Benchmarks/BenchmarkDotNet.Artifacts/results/
+ retention-days: 14
+
+ performance-guide:
+ needs: check-performance-label
+ if: needs.check-performance-label.outputs.should-run == 'false'
+ runs-on: ubuntu-latest
+
+ permissions:
+ pull-requests: write
+
+ steps:
+ - name: Comment with performance testing guide
+ uses: actions/github-script@v7
+ with:
+ script: |
+ // Check if we already provided guidance
+ const comments = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const hasGuidance = comments.data.some(comment =>
+ comment.user.login === 'github-actions[bot]' &&
+ comment.body.includes('Performance Testing Available')
+ );
+
+ if (!hasGuidance) {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: `## π Performance Testing Available
+
+ This PR modifies performance-sensitive code. If you want to validate performance impact:
+
+ **Option 1:** Add the \`performance\` label to this PR
+ **Option 2:** Include \`[perf]\` in your PR title
+
+ This will trigger core operations benchmarking with historical comparison to help identify any performance regressions.
+
+ **π View Current Trends:** [Performance Dashboard](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/)
+
+ **Local testing:** For comprehensive performance analysis, run benchmarks locally:
+ \`\`\`bash
+ cd Benchmarks
+ dotnet run --configuration Release
+ \`\`\`
+ `
+ });
+ }
diff --git a/.github/workflows/benchmarks-release.yml b/.github/workflows/benchmarks-release.yml
new file mode 100644
index 0000000..2b77d98
--- /dev/null
+++ b/.github/workflows/benchmarks-release.yml
@@ -0,0 +1,289 @@
+name: Release Performance Validation
+
+on:
+ release:
+ types: [published, prereleased]
+ workflow_dispatch:
+ inputs:
+ benchmark_filter:
+ description: 'Benchmarks to run'
+ required: false
+ default: 'all'
+ type: choice
+ options:
+ - core
+ - datasize
+ - expiration
+ - concurrency
+ - bulk
+ - all
+ create_release_notes:
+ description: 'Create performance summary for release notes'
+ required: false
+ default: true
+ type: boolean
+
+env:
+ DOTNET_VERSION: '9.0.x'
+
+jobs:
+ comprehensive-benchmarks:
+ runs-on: ubuntu-latest
+ timeout-minutes: 120
+
+ strategy:
+ fail-fast: false
+ matrix:
+ benchmark: ${{ fromJson(github.event.inputs.benchmark_filter == 'all' && '["core", "datasize", "expiration", "concurrency", "bulk"]' || github.event.inputs.benchmark_filter && format('["{0}"]', github.event.inputs.benchmark_filter) || '["core", "datasize", "expiration", "concurrency", "bulk"]') }}
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: ${{ env.DOTNET_VERSION }}
+
+ - name: Restore dependencies
+ run: dotnet restore Benchmarks/Benchmarks.csproj
+
+ - name: Build benchmarks
+ run: dotnet build Benchmarks/Benchmarks.csproj --configuration Release --no-restore
+
+ - name: Run ${{ matrix.benchmark }} benchmark
+ run: dotnet run --project Benchmarks/Benchmarks.csproj --configuration Release --no-build -- ${{ matrix.benchmark }}
+
+ - name: Upload benchmark results
+ uses: actions/upload-artifact@v4
+ with:
+ name: release-benchmark-${{ matrix.benchmark }}
+ path: Benchmarks/BenchmarkDotNet.Artifacts/results/
+ retention-days: 365 # Keep release benchmarks for a year
+
+ - name: Create benchmark summary
+ run: |
+ echo "# ${{ matrix.benchmark }} Benchmark Results" >> $GITHUB_STEP_SUMMARY
+ echo "**Release:** ${{ github.event.release.tag_name || 'Manual Run' }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Date:** $(date)" >> $GITHUB_STEP_SUMMARY
+ echo "**Commit:** ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Find and include the GitHub markdown report
+ REPORT_FILE=$(find Benchmarks/BenchmarkDotNet.Artifacts/results/ -name "*github.md" | head -1)
+ if [ -f "$REPORT_FILE" ]; then
+ cat "$REPORT_FILE" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "No markdown report found for ${{ matrix.benchmark }}" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ generate-performance-report:
+ needs: comprehensive-benchmarks
+ runs-on: ubuntu-latest
+ if: always()
+
+ permissions:
+ contents: write
+ pull-requests: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Download all benchmark results
+ uses: actions/download-artifact@v4
+ with:
+ pattern: release-benchmark-*
+ path: ./benchmark-results
+
+ - name: Generate comprehensive performance report
+ run: |
+ mkdir -p reports
+
+ RELEASE_TAG="${{ github.event.release.tag_name || 'manual-run' }}"
+ REPORT_FILE="reports/performance-report-${RELEASE_TAG}.md"
+
+ echo "# PostgreSQL Distributed Cache - Performance Report" > "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+ echo "**Release:** ${RELEASE_TAG}" >> "$REPORT_FILE"
+ echo "**Generated:** $(date)" >> "$REPORT_FILE"
+ echo "**Commit:** ${{ github.sha }}" >> "$REPORT_FILE"
+ echo "**Runner:** GitHub Actions (ubuntu-latest)" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+
+ echo "## Executive Summary" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+ echo "This report contains comprehensive performance benchmarks for the PostgreSQL distributed cache library." >> "$REPORT_FILE"
+ echo "All benchmarks were run in Release configuration using BenchmarkDotNet with PostgreSQL TestContainers." >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+
+ echo "## Benchmark Results" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+
+ # Process each benchmark type
+ for benchmark_dir in benchmark-results/release-benchmark-*; do
+ if [ -d "$benchmark_dir" ]; then
+ BENCHMARK_NAME=$(basename "$benchmark_dir" | sed 's/release-benchmark-//')
+ echo "### ${BENCHMARK_NAME^} Operations" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+
+ # Find the GitHub markdown report
+ MD_FILE=$(find "$benchmark_dir" -name "*github.md" | head -1)
+ if [ -f "$MD_FILE" ]; then
+ cat "$MD_FILE" >> "$REPORT_FILE"
+ else
+ echo "β οΈ No results found for $BENCHMARK_NAME benchmark" >> "$REPORT_FILE"
+ fi
+ echo "" >> "$REPORT_FILE"
+ fi
+ done
+
+ echo "## Performance Notes" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+ echo "- **Environment**: GitHub Actions Ubuntu runner with 4 vCPUs and 16GB RAM" >> "$REPORT_FILE"
+ echo "- **Database**: PostgreSQL 16 in Docker container" >> "$REPORT_FILE"
+ echo "- **.NET Version**: ${{ env.DOTNET_VERSION }}" >> "$REPORT_FILE"
+ echo "- **Configuration**: Release build with optimizations enabled" >> "$REPORT_FILE"
+ echo "- **Garbage Collection**: Server GC enabled" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+ echo "**Important**: These results are from a virtualized environment and should be used for relative comparison." >> "$REPORT_FILE"
+ echo "For production performance planning, run benchmarks in your target environment." >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+
+ echo "## Recommendations" >> "$REPORT_FILE"
+ echo "" >> "$REPORT_FILE"
+ echo "1. **Production Testing**: Validate performance in your production-like environment" >> "$REPORT_FILE"
+ echo "2. **Monitoring**: Implement performance monitoring to track trends over time" >> "$REPORT_FILE"
+ echo "3. **Tuning**: Consider connection pooling and PostgreSQL configuration optimization" >> "$REPORT_FILE"
+ echo "4. **Scaling**: Review concurrent operation performance for your expected load" >> "$REPORT_FILE"
+
+ echo "Generated performance report: $REPORT_FILE"
+
+ - name: Upload performance report
+ uses: actions/upload-artifact@v4
+ with:
+ name: performance-report-${{ github.event.release.tag_name || 'manual' }}
+ path: reports/
+ retention-days: 365
+
+ - name: Add performance summary to release notes
+ if: github.event.release && (github.event.inputs.create_release_notes != 'false')
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ try {
+ const reportPath = 'reports/performance-report-' + context.payload.release.tag_name + '.md';
+
+ if (fs.existsSync(reportPath)) {
+ const report = fs.readFileSync(reportPath, 'utf8');
+
+ // Create a condensed version for release notes
+ const summaryLines = report.split('\n').slice(0, 50); // First 50 lines
+ const summary = summaryLines.join('\n');
+
+ const currentRelease = await github.rest.repos.getRelease({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ release_id: context.payload.release.id
+ });
+
+ const currentBody = currentRelease.data.body || '';
+ const performanceSection = '\n\n## π Performance Report\n\n' + summary + '\n\n**π [Download Full Performance Report](https://github.com/' + context.repo.owner + '/' + context.repo.repo + '/actions/runs/' + context.runId + ')**\n\n---';
+
+ await github.rest.repos.updateRelease({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ release_id: context.payload.release.id,
+ body: currentBody + performanceSection
+ });
+
+ console.log('Added performance summary to release notes');
+ } else {
+ console.log('Performance report not found, skipping release notes update');
+ }
+ } catch (error) {
+ console.error('Error updating release notes:', error);
+ // Don't fail the workflow if we can't update release notes
+ }
+
+ - name: Create performance comparison issue
+ if: github.event.release
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const title = 'π Performance Review: ' + context.payload.release.tag_name;
+ const body = '# Performance Review for Release ' + context.payload.release.tag_name + '\n\n' +
+ 'A comprehensive performance benchmark has been completed for this release.\n\n' +
+ '## Quick Actions\n' +
+ '- [ ] Review benchmark results against previous releases\n' +
+ '- [ ] Identify any performance regressions\n' +
+ '- [ ] Document any significant improvements\n' +
+ '- [ ] Update performance baselines if needed\n\n' +
+ '## Benchmark Results\n\n' +
+ 'π **[Download Full Results](https://github.com/' + context.repo.owner + '/' + context.repo.repo + '/actions/runs/' + context.runId + ')**\n\n' +
+ 'The benchmark suite tested:\n' +
+ '- β
Core Operations (Get, Set, Delete, Refresh)\n' +
+ '- β
Data Size Impact (1KB to 1MB payloads)\n' +
+ '- β
Expiration Strategies\n' +
+ '- β
Concurrency Performance (2-16 concurrent operations)\n' +
+ '- β
Bulk Operations (10-500 operation batches)\n\n' +
+ '## Environment Details\n' +
+ '- **Runtime**: .NET ' + process.env.DOTNET_VERSION + '\n' +
+ '- **Database**: PostgreSQL 16 (TestContainer)\n' +
+ '- **Platform**: GitHub Actions Ubuntu Runner\n' +
+ '- **Configuration**: Release build\n\n' +
+ '---\n\n' +
+ '_This issue was automatically created by the Release Performance Validation workflow._';
+
+ await github.rest.issues.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ title: title,
+ body: body,
+ labels: ['performance', 'release', 'review']
+ });
+
+ console.log('Created performance review issue');
+
+ performance-regression-check:
+ needs: comprehensive-benchmarks
+ runs-on: ubuntu-latest
+ if: always()
+
+ steps:
+ - name: Download benchmark results
+ uses: actions/download-artifact@v4
+ with:
+ pattern: release-benchmark-core
+ path: ./benchmark-results
+
+ - name: Basic regression analysis
+ run: |
+ echo "# Performance Regression Analysis" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Look for any obvious performance issues in core operations
+ CORE_RESULTS=$(find benchmark-results -name "*CoreOperationsBenchmark*github.md" | head -1)
+
+ if [ -f "$CORE_RESULTS" ]; then
+ echo "## Core Operations Analysis" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Extract mean times and look for any operations taking > 100ms
+ if grep -q "| [^|]*| [^|]*| [^|]*[1-9][0-9][0-9]\.[0-9]* ms" "$CORE_RESULTS"; then
+ echo "β οΈ **Potential Performance Issue Detected**" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "Some operations are taking over 100ms. Review the full results for details." >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "β
Core operations performance looks good (all under 100ms average)" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "**Note**: This is a basic automated check. For comprehensive analysis, review the full benchmark results." >> $GITHUB_STEP_SUMMARY
+ else
+ echo "β Core benchmark results not found for analysis" >> $GITHUB_STEP_SUMMARY
+ fi
diff --git a/.github/workflows/benchmarks-scheduled.yml b/.github/workflows/benchmarks-scheduled.yml
new file mode 100644
index 0000000..da0ab0c
--- /dev/null
+++ b/.github/workflows/benchmarks-scheduled.yml
@@ -0,0 +1,157 @@
+name: Scheduled Performance Benchmarks
+
+on:
+ schedule:
+ # Run Monday and Thursday at 2 AM UTC to track performance trends
+ - cron: '0 2 * * 1,4'
+ workflow_dispatch:
+ inputs:
+ benchmark_suite:
+ description: 'Benchmark suite to run'
+ required: false
+ default: 'core'
+ type: choice
+ options:
+ - core
+ - datasize
+ - expiration
+ - concurrency
+ - bulk
+ - all
+ push:
+ branches:
+ [
+ master,
+ 79-request-add-benchmarks-and-integrate-performance-testing-into-cicd,
+ ]
+ paths:
+ - 'Extensions.Caching.PostgreSql/**'
+ - 'Benchmarks/**'
+
+env:
+ DOTNET_VERSION: '9.0.x'
+
+jobs:
+ benchmark:
+ runs-on: ubuntu-latest
+ timeout-minutes: 90
+
+ permissions:
+ contents: write
+ deployments: write
+ pages: write
+
+ strategy:
+ matrix:
+ benchmark: ${{ fromJson(github.event.inputs.benchmark_suite == 'all' && '["core", "datasize", "expiration", "concurrency", "bulk"]' || github.event.inputs.benchmark_suite && format('["{0}"]', github.event.inputs.benchmark_suite) || '["core"]') }}
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: ${{ env.DOTNET_VERSION }}
+
+ - name: Restore dependencies
+ run: dotnet restore Benchmarks/Benchmarks.csproj
+
+ - name: Build benchmarks
+ run: dotnet build Benchmarks/Benchmarks.csproj --configuration Release --no-restore
+
+ - name: Run ${{ matrix.benchmark }} benchmark
+ run: dotnet run --project Benchmarks/Benchmarks.csproj --configuration Release --no-build -- ${{ matrix.benchmark }}
+
+ - name: Set benchmark class name
+ id: benchmark-class
+ run: |
+ case "${{ matrix.benchmark }}" in
+ "core") echo "class_name=CoreOperationsBenchmark" >> $GITHUB_OUTPUT ;;
+ "datasize") echo "class_name=DataSizeBenchmark" >> $GITHUB_OUTPUT ;;
+ "expiration") echo "class_name=ExpirationBenchmark" >> $GITHUB_OUTPUT ;;
+ "concurrency") echo "class_name=ConcurrencyBenchmark" >> $GITHUB_OUTPUT ;;
+ "bulk") echo "class_name=BulkOperationsBenchmark" >> $GITHUB_OUTPUT ;;
+ *) echo "class_name=Unknown" >> $GITHUB_OUTPUT ;;
+ esac
+
+ - name: Store benchmark result
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ name: ${{ matrix.benchmark }}-benchmark
+ tool: 'benchmarkdotnet'
+ output-file-path: BenchmarkDotNet.Artifacts/results/Benchmarks.UseCases.${{ steps.benchmark-class.outputs.class_name }}-report-full-compressed.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: true
+ # Show alert with commit comment on detecting possible performance regression
+ alert-threshold: '150%'
+ comment-on-alert: true
+ fail-on-alert: false
+ # Store data in gh-pages branch
+ gh-pages-branch: 'gh-pages'
+ benchmark-data-dir-path: 'benchmarks'
+
+ - name: Upload benchmark results
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-results-${{ matrix.benchmark }}-${{ github.run_number }}
+ path: Benchmarks/BenchmarkDotNet.Artifacts/results/
+ retention-days: 30
+
+ - name: Create benchmark summary
+ run: |
+ echo "# Benchmark Results: ${{ matrix.benchmark }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Date:** $(date)" >> $GITHUB_STEP_SUMMARY
+ echo "**Commit:** ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "π **[View Historical Trends](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/)**" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Find and include the GitHub markdown report
+ REPORT_FILE=$(find Benchmarks/BenchmarkDotNet.Artifacts/results/ -name "*github.md" | head -1)
+ if [ -f "$REPORT_FILE" ]; then
+ cat "$REPORT_FILE" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "No markdown report found" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ performance-report:
+ needs: benchmark
+ runs-on: ubuntu-latest
+ if: always()
+
+ steps:
+ - name: Download all benchmark results
+ uses: actions/download-artifact@v4
+ with:
+ pattern: benchmark-results-*
+ merge-multiple: true
+ path: ./benchmark-results
+
+ - name: Generate consolidated report
+ run: |
+ mkdir -p reports
+
+ echo "# PostgreSQL Cache Performance Report" > reports/performance-summary.md
+ echo "**Generated:** $(date)" >> reports/performance-summary.md
+ echo "**Commit:** ${{ github.sha }}" >> reports/performance-summary.md
+ echo "**Trigger:** ${{ github.event_name }}" >> reports/performance-summary.md
+ echo "" >> reports/performance-summary.md
+ echo "π **[View Historical Dashboard](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/)**" >> reports/performance-summary.md
+ echo "" >> reports/performance-summary.md
+
+ # Process each benchmark type
+ for md_file in benchmark-results/*github.md; do
+ if [ -f "$md_file" ]; then
+ echo "## $(basename "$md_file" | sed 's/-report-github.md//')" >> reports/performance-summary.md
+ cat "$md_file" >> reports/performance-summary.md
+ echo "" >> reports/performance-summary.md
+ fi
+ done
+
+ - name: Upload consolidated report
+ uses: actions/upload-artifact@v4
+ with:
+ name: performance-report-${{ github.run_number }}
+ path: reports/performance-summary.md
+ retention-days: 90
diff --git a/.github/workflows/dotnet-core.yml b/.github/workflows/dotnet-core.yml
index 27471d6..faf3e16 100644
--- a/.github/workflows/dotnet-core.yml
+++ b/.github/workflows/dotnet-core.yml
@@ -27,4 +27,4 @@ jobs:
name: Extensions.Caching.PostgreSql
path: ./Extensions.Caching.PostgreSql/bin/Release/Community.Microsoft.Extensions.Caching.PostgreSql.${{ github.event.release.tag_name }}.nupkg
- name: Deploy to Nuget
- run: dotnet nuget push **/*.nupkg --api-key ${{ secrets.NUGET_API_SECRET }} --source https://api.nuget.org/v3/index.json --no-symbols true
+ run: dotnet nuget push **/*.nupkg --api-key ${{ secrets.NUGET_API_SECRET }} --source https://api.nuget.org/v3/index.json --no-symbols
diff --git a/.github/workflows/dotnet-test.yml b/.github/workflows/dotnet-test.yml
index 3e5b90f..f10d073 100644
--- a/.github/workflows/dotnet-test.yml
+++ b/.github/workflows/dotnet-test.yml
@@ -87,6 +87,14 @@ jobs:
name: html-coverage-report
path: ./TestResults/coverage/report
+ - name: Deploy Coverage Report to GitHub Pages
+ if: env.COVERAGE_FILE_PATH != '' && github.ref == 'refs/heads/main'
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./TestResults/coverage/report
+ destination_dir: coverage
+
- name: Display Coverage Summary
if: env.COVERAGE_FILE_PATH != ''
run: |
diff --git a/.github/workflows/setup-benchmark-dashboard.yml b/.github/workflows/setup-benchmark-dashboard.yml
new file mode 100644
index 0000000..94ade56
--- /dev/null
+++ b/.github/workflows/setup-benchmark-dashboard.yml
@@ -0,0 +1,297 @@
+name: 'Setup Benchmark Dashboard'
+
+on:
+ workflow_dispatch:
+ inputs:
+ force_setup:
+ description: 'Force setup even if already configured'
+ required: false
+ default: false
+ type: boolean
+
+jobs:
+ setup-dashboard:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pages: write
+ id-token: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Check if already setup
+ id: check-setup
+ run: |
+ if git ls-remote --exit-code --heads origin gh-pages >/dev/null 2>&1 && [ "${{ github.event.inputs.force_setup }}" != "true" ]; then
+ echo "setup-needed=false" >> $GITHUB_OUTPUT
+ echo "gh-pages branch already exists. Skipping setup."
+ else
+ echo "setup-needed=true" >> $GITHUB_OUTPUT
+ echo "Setting up benchmark dashboard..."
+ fi
+
+ - name: Create initial benchmark data structure
+ if: steps.check-setup.outputs.setup-needed == 'true'
+ run: |
+ # Create the benchmarks directory structure
+ mkdir -p benchmarks
+
+ # Create initial data files for each benchmark type
+ cat > benchmarks/core-benchmark.json << 'EOF'
+ []
+ EOF
+
+ cat > benchmarks/datasize-benchmark.json << 'EOF'
+ []
+ EOF
+
+ cat > benchmarks/expiration-benchmark.json << 'EOF'
+ []
+ EOF
+
+ cat > benchmarks/concurrency-benchmark.json << 'EOF'
+ []
+ EOF
+
+ cat > benchmarks/bulk-benchmark.json << 'EOF'
+ []
+ EOF
+
+ - name: Create dashboard index page
+ if: steps.check-setup.outputs.setup-needed == 'true'
+ run: |
+ cat > benchmarks/index.html << 'EOF'
+
+
+
+
+
+ PostgreSQL Cache Performance Dashboard
+
+
+
+
+
+
+
π PostgreSQL Distributed Cache - Performance Dashboard
+
+
+
About this Dashboard:
+ This dashboard shows performance trends for the PostgreSQL distributed cache library.
+ Benchmarks are automatically run on schedule and for releases to track performance over time.
+
+
View Repository β
+
+
+
+
π Core Operations
+
Basic cache operations: Get, Set, Delete, Refresh
+
+
+
+
+
+
+
π¦ Data Size Impact
+
Performance with different payload sizes (1KB to 1MB)
+
+
+
+
+
+
+
β° Expiration Strategies
+
Different cache expiration configurations
+
+
+
+
+
+
+
π Concurrency Performance
+
Performance under concurrent access (2-16 concurrent operations)
+
+
+
+
+
+
+
β‘ Bulk Operations
+
High-throughput scenarios and bulk operations (10-500 items)
+
+
+
+
+
+
+
+
+
+ EOF
+
+ # Replace placeholder with actual repository name
+ sed -i "s/\$GITHUB_REPOSITORY/${{ github.repository }}/g" benchmarks/index.html
+
+ - name: Create README for gh-pages
+ if: steps.check-setup.outputs.setup-needed == 'true'
+ run: |
+ cat > benchmarks/README.md << 'EOF'
+ # Performance Dashboard
+
+ This branch contains the performance benchmark data and dashboard for the PostgreSQL distributed cache library.
+
+ ## View Dashboard
+
+ Visit the live dashboard at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/
+
+ ## Data Structure
+
+ - `core-benchmark.json` - Core operations data
+ - `datasize-benchmark.json` - Data size impact data
+ - `expiration-benchmark.json` - Expiration strategies data
+ - `concurrency-benchmark.json` - Concurrency performance data
+ - `bulk-benchmark.json` - Bulk operations data
+ - `index.html` - Dashboard web interface
+
+ ## Automated Updates
+
+ This data is automatically updated by GitHub Actions workflows:
+ - Scheduled runs (Monday/Thursday)
+ - Release validations
+ - Manual triggers
+ EOF
+
+ - name: Deploy to GitHub Pages
+ if: steps.check-setup.outputs.setup-needed == 'true'
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./benchmarks
+ publish_branch: gh-pages
+ commit_message: 'Initial setup of benchmark dashboard'
+
+ - name: Setup complete
+ if: steps.check-setup.outputs.setup-needed == 'true'
+ run: |
+ echo "## π Benchmark Dashboard Setup Complete!" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "Your performance dashboard has been initialized and will be available at:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "π **https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/**" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### Next Steps:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "1. **Enable GitHub Pages** in repository Settings β Pages" >> $GITHUB_STEP_SUMMARY
+ echo " - Source: Deploy from a branch" >> $GITHUB_STEP_SUMMARY
+ echo " - Branch: gh-pages" >> $GITHUB_STEP_SUMMARY
+ echo " - Folder: / (root)" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "2. **Run your first benchmark** using the scheduled workflow or manually" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "3. **Wait 5-10 minutes** for GitHub Pages to deploy" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "The dashboard will show 'No data available' until you run benchmarks." >> $GITHUB_STEP_SUMMARY
+
+ - name: Already setup
+ if: steps.check-setup.outputs.setup-needed == 'false'
+ run: |
+ echo "## β
Dashboard Already Setup" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "Your benchmark dashboard is already configured at:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "π **https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/benchmarks/**" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "To force re-setup, run this workflow again with 'Force setup' checked." >> $GITHUB_STEP_SUMMARY
diff --git a/.gitignore b/.gitignore
index 9d89c10..88abd6b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -260,4 +260,7 @@ paket-files/
# Python Tools for Visual Studio (PTVS)
__pycache__/
-*.pyc
\ No newline at end of file
+*.pyc
+
+Benchmarks/BenchmarkDotNet.Artifacts/*
+Benchmarks/BenchmarkDotNet.Artifacts/results/*
\ No newline at end of file
diff --git a/AZURE_KEY_VAULT_ROTATION.md b/AZURE_KEY_VAULT_ROTATION.md
new file mode 100644
index 0000000..39a51c8
--- /dev/null
+++ b/AZURE_KEY_VAULT_ROTATION.md
@@ -0,0 +1,369 @@
+# Azure Key Vault Rotation Support
+
+This document explains how to implement connection string reloading for Azure Key Vault rotation scenarios in the PostgreSQL Distributed Cache library.
+
+## Overview
+
+Azure Key Vault rotation is a security best practice that involves periodically updating secrets (like database connection strings) without application downtime. This library provides built-in support for automatically reloading connection strings when they are updated in Azure Key Vault.
+
+## Features
+
+- **Automatic Connection String Reloading**: Periodically checks for updated connection strings in configuration
+- **Configurable Reload Intervals**: Set how often to check for updates (default: 5 minutes)
+- **Thread-Safe Operations**: Safe concurrent access to connection string updates
+- **Comprehensive Logging**: Detailed logging of connection string changes
+- **Graceful Fallback**: Continues using existing connection string if reload fails
+
+## Implementation Approaches
+
+### Approach 1: Using the Reloadable Connection String Extension (Recommended)
+
+This is the simplest approach using the new extension method:
+
+```csharp
+// In Program.cs or Startup.cs
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(5),
+ setupAction: options =>
+ {
+ options.SchemaName = "cache";
+ options.TableName = "cache_items";
+ options.DisableRemoveExpired = false;
+ options.UpdateOnGetCacheItem = true;
+ options.ReadOnlyMode = false;
+ options.CreateInfrastructure = true;
+ });
+```
+
+### Approach 2: Manual Configuration
+
+For more control, configure the reloadable connection string manually:
+
+```csharp
+builder.Services.AddDistributedPostgreSqlCache((serviceProvider, setup) =>
+{
+ var configuration = serviceProvider.GetRequiredService();
+ var logger = serviceProvider.GetRequiredService>();
+
+ // Enable reloadable connection string
+ setup.ConnectionStringKey = "PostgreSqlCache:ConnectionString";
+ setup.Configuration = configuration;
+ setup.Logger = logger;
+ setup.EnableConnectionStringReloading = true;
+ setup.ConnectionStringReloadInterval = TimeSpan.FromMinutes(5);
+
+ // Other configuration options
+ setup.SchemaName = "cache";
+ setup.TableName = "cache_items";
+ setup.DisableRemoveExpired = false;
+ setup.UpdateOnGetCacheItem = true;
+ setup.ReadOnlyMode = false;
+ setup.CreateInfrastructure = true;
+});
+```
+
+## Azure Key Vault Configuration
+
+### 1. Install Required Packages
+
+```bash
+dotnet add package Azure.Security.KeyVault.Secrets
+dotnet add package Azure.Identity
+dotnet add package Microsoft.Extensions.Configuration.AzureKeyVault
+```
+
+### 2. Configure Azure Key Vault in Program.cs
+
+```csharp
+using Azure.Identity;
+using Azure.Security.KeyVault.Secrets;
+using Microsoft.Extensions.Configuration;
+
+var builder = WebApplication.CreateBuilder(args);
+
+// Configure Azure Key Vault
+var keyVaultUrl = $"https://{builder.Configuration["AzureKeyVault:VaultName"]}.vault.azure.net/";
+var credential = new ClientSecretCredential(
+ builder.Configuration["AzureKeyVault:TenantId"],
+ builder.Configuration["AzureKeyVault:ClientId"],
+ builder.Configuration["AzureKeyVault:ClientSecret"]);
+
+var secretClient = new SecretClient(new Uri(keyVaultUrl), credential);
+
+// Add Azure Key Vault as configuration source
+builder.Configuration.AddAzureKeyVault(secretClient, new AzureKeyVaultConfigurationOptions());
+
+// Configure PostgreSQL cache with reloadable connection string
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(5));
+```
+
+### 3. Configuration Settings
+
+Add the following to your `appsettings.json`:
+
+```json
+{
+ "AzureKeyVault": {
+ "VaultName": "your-key-vault-name",
+ "TenantId": "your-tenant-id",
+ "ClientId": "your-client-id",
+ "ClientSecret": "your-client-secret"
+ },
+ "PgCache": {
+ "ConnectionStringKey": "PostgreSqlCache:ConnectionString",
+ "SchemaName": "cache",
+ "TableName": "cache_items",
+ "EnableConnectionStringReloading": true,
+ "ConnectionStringReloadInterval": "00:05:00"
+ }
+}
+```
+
+## Azure Key Vault Setup
+
+### 1. Create Key Vault Secret
+
+Store your PostgreSQL connection string in Azure Key Vault:
+
+```bash
+# Using Azure CLI
+az keyvault secret set --vault-name "your-key-vault-name" --name "PostgreSqlCache--ConnectionString" --value "Host=your-server;Database=your-db;Username=your-user;Password=your-password"
+```
+
+### 2. Configure Access Policies
+
+Ensure your application has access to read secrets:
+
+```bash
+# Using Azure CLI
+az keyvault set-policy --name "your-key-vault-name" --spn "your-app-service-principal-id" --secret-permissions get list
+```
+
+### 3. Enable Soft Delete (Recommended)
+
+```bash
+az keyvault update --name "your-key-vault-name" --enable-soft-delete true
+```
+
+## Rotation Process
+
+### Manual Rotation
+
+1. **Update the Secret in Azure Key Vault**:
+
+ ```bash
+ az keyvault secret set --vault-name "your-key-vault-name" --name "PostgreSqlCache--ConnectionString" --value "Host=new-server;Database=your-db;Username=your-user;Password=new-password"
+ ```
+
+2. **Application Automatically Picks Up Changes**:
+ - The library checks for updates every 5 minutes (configurable)
+ - When a change is detected, it logs the update
+ - New connections use the updated connection string
+
+### Automated Rotation
+
+For automated rotation, you can:
+
+1. **Use Azure Key Vault Rotation Policies**:
+
+ ```bash
+ az keyvault secret set-attributes --vault-name "your-key-vault-name" --name "PostgreSqlCache--ConnectionString" --expires 2024-12-31T23:59:59Z
+ ```
+
+2. **Implement Custom Rotation Logic**:
+ ```csharp
+ // In your rotation service
+ public async Task RotateConnectionStringAsync()
+ {
+ var newConnectionString = GenerateNewConnectionString();
+ await secretClient.SetSecretAsync("PostgreSqlCache--ConnectionString", newConnectionString);
+ }
+ ```
+
+## Monitoring and Logging
+
+The library provides comprehensive logging for connection string operations:
+
+```csharp
+// Configure logging in appsettings.json
+{
+ "Logging": {
+ "LogLevel": {
+ "Community.Microsoft.Extensions.Caching.PostgreSql": "Information"
+ }
+ }
+}
+```
+
+### Log Messages
+
+- `Connection string updated from configuration key: {Key}` - When connection string is updated
+- `Connection string manually reloaded from configuration key: {Key}` - When manually reloaded
+- `Connection string reload timer triggered for key: {Key}` - Timer events
+- `Connection string not found for key: {Key}` - Configuration issues
+- `Error loading connection string from configuration key: {Key}` - Errors
+
+## Best Practices
+
+### 1. Security
+
+- **Use Managed Identity**: Prefer managed identity over client secrets when possible
+- **Least Privilege**: Grant only necessary permissions to your application
+- **Secret Rotation**: Implement automated rotation policies
+- **Monitoring**: Monitor access to Key Vault secrets
+
+### 2. Performance
+
+- **Reasonable Reload Intervals**: Don't check too frequently (minimum 1 minute recommended)
+- **Connection Pooling**: The library handles connection pooling automatically
+- **Monitoring**: Monitor connection string reload performance
+
+### 3. Reliability
+
+- **Graceful Degradation**: The library continues using existing connections if reload fails
+- **Error Handling**: Comprehensive error handling and logging
+- **Fallback Strategy**: Consider having a fallback connection string
+
+### 4. Configuration
+
+```csharp
+// Recommended configuration
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(5), // Check every 5 minutes
+ setupAction: options =>
+ {
+ options.SchemaName = "cache";
+ options.TableName = "cache_items";
+ options.CreateInfrastructure = true;
+ options.ExpiredItemsDeletionInterval = TimeSpan.FromMinutes(30);
+ options.DefaultSlidingExpiration = TimeSpan.FromMinutes(20);
+ });
+```
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Connection String Not Found**:
+
+ - Verify the configuration key exists in Azure Key Vault
+ - Check application permissions to Key Vault
+ - Ensure the secret name matches the configuration key
+
+2. **Reload Not Working**:
+
+ - Check if `EnableConnectionStringReloading` is set to `true`
+ - Verify `Configuration` and `Logger` are properly set
+ - Check logs for error messages
+
+3. **Performance Issues**:
+ - Increase reload interval if checking too frequently
+ - Monitor connection pool usage
+ - Check for connection leaks
+
+### Debug Configuration
+
+```csharp
+// Enable detailed logging
+builder.Services.AddLogging(logging =>
+{
+ logging.AddConsole();
+ logging.SetMinimumLevel(LogLevel.Debug);
+});
+
+// Add configuration debugging
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(1), // Shorter interval for testing
+ setupAction: options =>
+ {
+ options.Logger.LogInformation("Cache configured with reloadable connection string");
+ });
+```
+
+## Migration from Static Connection Strings
+
+If you're currently using static connection strings, here's how to migrate:
+
+### Before (Static)
+
+```csharp
+builder.Services.AddDistributedPostgreSqlCache(setup =>
+{
+ setup.ConnectionString = "Host=localhost;Database=cache;Username=user;Password=pass";
+ setup.SchemaName = "cache";
+ setup.TableName = "cache_items";
+});
+```
+
+### After (Reloadable)
+
+```csharp
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ setupAction: options =>
+ {
+ options.SchemaName = "cache";
+ options.TableName = "cache_items";
+ });
+```
+
+## Advanced Scenarios
+
+### Custom Reload Logic
+
+For custom reload logic, you can extend the `ReloadableConnectionStringProvider`:
+
+```csharp
+public class CustomConnectionStringProvider : ReloadableConnectionStringProvider
+{
+ public CustomConnectionStringProvider(
+ IConfiguration configuration,
+ ILogger logger,
+ string connectionStringKey,
+ TimeSpan reloadInterval)
+ : base(configuration, logger, connectionStringKey, reloadInterval)
+ {
+ }
+
+ protected override string LoadConnectionString()
+ {
+ // Custom logic here
+ var connectionString = base.LoadConnectionString();
+
+ // Add custom validation or transformation
+ if (string.IsNullOrEmpty(connectionString))
+ {
+ throw new InvalidOperationException("Connection string cannot be empty");
+ }
+
+ return connectionString;
+ }
+}
+```
+
+### Multiple Connection Strings
+
+For applications with multiple databases:
+
+```csharp
+// Primary cache
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PrimaryCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(5));
+
+// Secondary cache
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "SecondaryCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(10));
+```
+
+## Conclusion
+
+This implementation provides a robust, secure, and efficient way to handle Azure Key Vault rotation for PostgreSQL connection strings. The automatic reloading mechanism ensures your application stays up-to-date with the latest secrets without manual intervention or application restarts.
+
+For more information about Azure Key Vault rotation, see the [official Microsoft documentation](https://docs.microsoft.com/en-us/azure/key-vault/secrets/overview-soft-delete).
diff --git a/Benchmarks/Benchmarks.csproj b/Benchmarks/Benchmarks.csproj
new file mode 100644
index 0000000..65348bc
--- /dev/null
+++ b/Benchmarks/Benchmarks.csproj
@@ -0,0 +1,31 @@
+ο»Ώ
+
+
+ Exe
+ net9.0
+ enable
+ enable
+ true
+ x64
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Benchmarks/Fixtures/PostgreSqlBenchmarkFixture.cs b/Benchmarks/Fixtures/PostgreSqlBenchmarkFixture.cs
new file mode 100644
index 0000000..c5909b5
--- /dev/null
+++ b/Benchmarks/Fixtures/PostgreSqlBenchmarkFixture.cs
@@ -0,0 +1,123 @@
+using System.Data.Common;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Testcontainers.PostgreSql;
+using Community.Microsoft.Extensions.Caching.PostgreSql;
+using DotNet.Testcontainers.Builders;
+
+namespace Benchmarks.Fixtures;
+
+///
+/// PostgreSQL TestContainer fixture for benchmarking
+///
+public class PostgreSqlBenchmarkFixture : IAsyncDisposable
+{
+ private readonly PostgreSqlContainer _container;
+ private readonly IServiceProvider _serviceProvider;
+ private readonly ILogger _logger;
+
+ public PostgreSqlBenchmarkFixture()
+ {
+ // Create PostgreSQL container
+ _container = new PostgreSqlBuilder()
+ .WithImage("postgres:16")
+ .WithDatabase("benchmark_db")
+ .WithUsername("benchmark_user")
+ .WithPassword("benchmark_password")
+ .WithPortBinding(5432, true)
+ .WithWaitStrategy(Wait.ForUnixContainer().UntilPortIsAvailable(5432))
+ .Build();
+
+ // Setup service provider
+ var services = new ServiceCollection();
+ services.AddLogging(builder => builder.AddConsole().SetMinimumLevel(LogLevel.Warning));
+
+ _serviceProvider = services.BuildServiceProvider();
+ _logger = _serviceProvider.GetRequiredService>();
+ }
+
+ ///
+ /// Gets the connection string for the PostgreSQL container
+ ///
+ public string ConnectionString => _container.GetConnectionString();
+
+ ///
+ /// Gets the PostgreSQL container instance
+ ///
+ public PostgreSqlContainer Container => _container;
+
+ ///
+ /// Initializes the container and creates a distributed cache instance
+ ///
+ public async Task InitializeAsync()
+ {
+ // Start the container
+ await _container.StartAsync();
+
+ // Create service collection and configure PostgreSQL cache
+ var services = new ServiceCollection();
+ services.AddLogging(builder => builder.AddConsole().SetMinimumLevel(LogLevel.Warning));
+
+ services.AddDistributedPostgreSqlCache(options =>
+ {
+ options.ConnectionString = ConnectionString;
+ options.SchemaName = "benchmark_cache";
+ options.TableName = "cache_items";
+ options.CreateInfrastructure = true;
+ options.DefaultSlidingExpiration = TimeSpan.FromMinutes(20);
+ options.ExpiredItemsDeletionInterval = TimeSpan.FromMinutes(5);
+ });
+
+ var serviceProvider = services.BuildServiceProvider();
+ var cache = serviceProvider.GetRequiredService();
+
+ // Ensure the cache is properly initialized
+ await cache.SetStringAsync("init_key", "init_value");
+ await cache.RemoveAsync("init_key");
+
+ return cache;
+ }
+
+ ///
+ /// Creates a new DbConnection to the PostgreSQL container
+ ///
+ public DbConnection CreateConnection()
+ {
+ var connection = new Npgsql.NpgsqlConnection(ConnectionString);
+ return connection;
+ }
+
+ ///
+ /// Cleans up the benchmark database by removing all cache items
+ ///
+ public async Task CleanupAsync()
+ {
+ try
+ {
+ using var connection = CreateConnection();
+ await connection.OpenAsync();
+
+ using var command = connection.CreateCommand();
+ command.CommandText = "DELETE FROM benchmark_cache.cache_items;";
+ await command.ExecuteNonQueryAsync();
+ }
+ catch (Exception ex)
+ {
+ _logger.LogWarning(ex, "Failed to cleanup benchmark database");
+ }
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ await _container.DisposeAsync();
+ if (_serviceProvider is IAsyncDisposable asyncDisposable)
+ {
+ await asyncDisposable.DisposeAsync();
+ }
+ else if (_serviceProvider is IDisposable disposable)
+ {
+ disposable.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/Program.cs b/Benchmarks/Program.cs
new file mode 100644
index 0000000..28fc79d
--- /dev/null
+++ b/Benchmarks/Program.cs
@@ -0,0 +1,84 @@
+ο»Ώusing BenchmarkDotNet.Running;
+using BenchmarkDotNet.Configs;
+using BenchmarkDotNet.Jobs;
+using BenchmarkDotNet.Toolchains.InProcess.Emit;
+using BenchmarkDotNet.Columns;
+using BenchmarkDotNet.Diagnosers;
+using BenchmarkDotNet.Exporters;
+using BenchmarkDotNet.Exporters.Csv;
+using BenchmarkDotNet.Exporters.Json;
+using BenchmarkDotNet.Loggers;
+using Benchmarks.UseCases;
+
+// Create a custom configuration for our benchmarks
+var config = ManualConfig.Create(DefaultConfig.Instance)
+ .AddJob(Job.Default
+ .WithRuntime(BenchmarkDotNet.Environments.CoreRuntime.Core90)
+ .WithToolchain(InProcessEmitToolchain.Instance)
+ .WithMinIterationCount(3)
+ .WithMaxIterationCount(10)
+ .WithWarmupCount(2))
+ .AddColumn(StatisticColumn.Mean)
+ .AddColumn(StatisticColumn.Error)
+ .AddColumn(StatisticColumn.StdDev)
+ .AddColumn(StatisticColumn.Min)
+ .AddColumn(StatisticColumn.Max)
+ .AddColumn(StatisticColumn.P90)
+ .AddColumn(StatisticColumn.P95)
+ .AddColumn(BaselineColumn.Default)
+ .AddColumn(RankColumn.Arabic)
+ .AddDiagnoser(MemoryDiagnoser.Default)
+ .AddExporter(MarkdownExporter.GitHub)
+ .AddExporter(HtmlExporter.Default)
+ .AddExporter(CsvExporter.Default)
+ .AddExporter(JsonExporter.Default)
+ .AddLogger(ConsoleLogger.Default);
+
+Console.WriteLine("PostgreSQL Distributed Cache Benchmarks");
+Console.WriteLine("========================================");
+Console.WriteLine();
+Console.WriteLine("Available benchmark classes:");
+Console.WriteLine("1. CoreOperationsBenchmark - Basic cache operations (Get, Set, Delete, Refresh)[~10 minutes]");
+Console.WriteLine("2. DataSizeBenchmark - Performance with different payload sizes [~10 minutes]");
+Console.WriteLine("3. ExpirationBenchmark - Different expiration strategies [~10 minutes]");
+Console.WriteLine("4. ConcurrencyBenchmark - Concurrent access patterns [~15 minutes]");
+Console.WriteLine("5. BulkOperationsBenchmark - Bulk operations and high-throughput scenarios [~15 minutes]");
+Console.WriteLine();
+
+// Check if user provided specific benchmark class
+if (args.Length > 0)
+{
+ var benchmarkType = args[0].ToLowerInvariant() switch
+ {
+ "core" or "coreoperations" => typeof(CoreOperationsBenchmark),
+ "datasize" or "size" => typeof(DataSizeBenchmark),
+ "expiration" or "expire" => typeof(ExpirationBenchmark),
+ "concurrency" or "concurrent" => typeof(ConcurrencyBenchmark),
+ "bulk" or "bulkoperations" => typeof(BulkOperationsBenchmark),
+ _ => null
+ };
+
+ if (benchmarkType != null)
+ {
+ Console.WriteLine($"Running {benchmarkType.Name}...");
+ BenchmarkRunner.Run(benchmarkType, config);
+ }
+ else
+ {
+ Console.WriteLine($"Unknown benchmark type: {args[0]}");
+ Console.WriteLine("Use one of: core, datasize, expiration, concurrency, bulk");
+ }
+}
+else
+{
+ // Run all benchmarks
+ Console.WriteLine("Running all benchmarks... This may take a while.");
+ Console.WriteLine("To run specific benchmarks, use: dotnet run -- ");
+ Console.WriteLine();
+
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+}
diff --git a/Benchmarks/README.md b/Benchmarks/README.md
new file mode 100644
index 0000000..2f49e87
--- /dev/null
+++ b/Benchmarks/README.md
@@ -0,0 +1,305 @@
+# PostgreSQL Distributed Cache Benchmarks
+
+This project contains comprehensive benchmarks for the PostgreSQL distributed cache library using BenchmarkDotNet and TestContainers.
+
+## Overview
+
+The benchmark suite evaluates performance across multiple dimensions:
+
+- **Core Operations**: Basic cache operations (Get, Set, Delete, Refresh)
+- **Data Size Impact**: Performance with different payload sizes
+- **Expiration Strategies**: Different expiration configurations
+- **Concurrency**: Performance under concurrent access
+- **Bulk Operations**: High-throughput scenarios and bulk operations
+
+## Prerequisites
+
+- .NET 9.0 SDK
+- Docker (for PostgreSQL TestContainer run)
+- At least 4GB RAM available for Docker containers
+- x64 platform (recommended for accurate benchmarks)
+
+## Quick Start
+
+### Run All Benchmarks
+
+```bash
+dotnet run --configuration Release
+```
+
+### Run Specific Benchmark
+
+```bash
+# Core operations benchmark
+dotnet run --configuration Release -- core
+
+# Data size benchmark
+dotnet run --configuration Release -- datasize
+
+# Expiration benchmark
+dotnet run --configuration Release -- expiration
+
+# Concurrency benchmark
+dotnet run --configuration Release -- concurrency
+
+# Bulk operations benchmark
+dotnet run --configuration Release -- bulk
+```
+
+## Benchmark Descriptions
+
+### 1. CoreOperationsBenchmark
+
+Tests the fundamental cache operations to establish baseline performance.
+
+**Operations Tested:**
+
+- `SetAsync` / `SetSync` - Adding new cache entries
+- `GetAsync_Hit` / `GetSync_Hit` - Retrieving existing entries
+- `GetAsync_Miss` / `GetSync_Miss` - Attempting to retrieve non-existent entries
+- `RefreshAsync` / `RefreshSync` - Updating expiration times
+- `RemoveAsync` / `RemoveSync` - Deleting cache entries
+
+**Key Metrics:**
+
+- Mean execution time per operation
+- Memory allocations
+- Throughput (operations per second)
+
+### 2. DataSizeBenchmark
+
+Evaluates how payload size affects cache performance.
+
+**Payload Sizes:**
+
+- Small: 1 KB
+- Medium: 10 KB
+- Large: 100 KB
+- Extra Large: 1 MB
+
+**Operations Tested:**
+
+- Set operations with different payload sizes
+- Get operations with different payload sizes
+- Both async and sync variants
+
+**Key Insights:**
+
+- Network I/O impact on larger payloads
+- Memory usage patterns
+- PostgreSQL BYTEA column performance
+
+### 3. ExpirationBenchmark
+
+Tests performance impact of different expiration strategies.
+
+**Expiration Types:**
+
+- No explicit expiration (uses default)
+- Sliding expiration
+- Absolute expiration (relative to now)
+- Absolute expiration (fixed time)
+- Both sliding and absolute expiration
+- Short expiration periods
+
+**Operations Tested:**
+
+- Set operations with different expiration configurations
+- Get operations (with expiration logic)
+- Refresh operations (sliding expiration updates)
+
+**Key Insights:**
+
+- Overhead of expiration calculation
+- Database query complexity impact
+- Refresh operation performance
+
+### 4. ConcurrencyBenchmark
+
+Tests cache performance under concurrent access patterns.
+
+**Concurrency Levels:** 2, 4, 8, 16 concurrent tasks
+
+**Scenarios:**
+
+- `ConcurrentSet` - Multiple simultaneous write operations
+- `ConcurrentGet` - Multiple simultaneous read operations
+- `ConcurrentMixedOperations` - Mixed read/write operations
+- `ConcurrentSetSameKey` - Write contention on same key
+- `ConcurrentGetSameKey` - Read amplification on same key
+- `ConcurrentHighContentionScenario` - High contention simulation
+- `ConcurrentBulkOperations` - Each task performs multiple operations
+
+**Key Insights:**
+
+- Database connection pooling effectiveness
+- Lock contention behavior
+- Scalability characteristics
+
+### 5. BulkOperationsBenchmark
+
+Tests high-throughput scenarios and bulk operations.
+
+**Bulk Sizes:** 10, 50, 100, 500 operations
+
+**Scenarios:**
+
+- `BulkSetSequential` vs `BulkSetParallel` - Sequential vs parallel writes
+- `BulkGetSequential` vs `BulkGetParallel` - Sequential vs parallel reads
+- `BulkMixedOperations` - Mixed operation batches
+- `BulkJsonSerialization` - Complex object serialization performance
+- `BulkRefreshOperations` - Batch refresh operations
+- `BulkRemoveOperations` - Batch delete operations
+- `HighThroughputScenario` - Mixed high-throughput simulation
+
+**Key Insights:**
+
+- Parallelization benefits
+- Serialization overhead
+- Database throughput limits
+
+## Understanding Results
+
+### Key Metrics Explained
+
+- **Mean**: Average execution time per operation
+- **Error**: Standard error of the mean
+- **StdDev**: Standard deviation of measurements
+- **Min/Max**: Fastest and slowest recorded times
+- **P90/P95**: 90th and 95th percentile response times
+- **Gen 0/1/2**: Garbage collection counts
+- **Allocated**: Memory allocated per operation
+
+### Performance Baselines
+
+Each benchmark class uses `[Benchmark(Baseline = true)]` on a representative operation. Results show:
+
+- **Ratio**: Performance relative to baseline (lower is better)
+- **Rank**: Performance ranking within the benchmark class
+
+### Interpreting Concurrent Results
+
+For concurrency benchmarks, pay attention to:
+
+- **Scaling efficiency**: How performance changes with increased concurrent tasks
+- **Contention indicators**: Disproportionate slowdown suggests lock contention
+- **Memory pressure**: Increased allocations under concurrency
+
+## TestContainer Setup
+
+The benchmarks use PostgreSQL TestContainers for isolation and reproducibility:
+
+- **Database**: PostgreSQL 16
+- **Schema**: `benchmark_cache`
+- **Table**: `cache_items`
+- **Cleanup**: Automatic cleanup between benchmark iterations
+
+## Configuration Options
+
+The PostgreSQL cache is configured with:
+
+- Default sliding expiration: 20 minutes
+- Expired items deletion interval: 5 minutes
+- Infrastructure creation: Enabled
+- Connection pooling: Enabled via Npgsql
+
+## Best Practices
+
+### Running Benchmarks
+
+1. **Use Release Configuration**: Always run with `--configuration Release`
+2. **Close Other Applications**: Minimize system noise
+3. **Multiple Runs**: Run benchmarks multiple times for consistency
+4. **Stable Environment**: Use the same machine configuration for comparisons
+
+### Interpreting Results
+
+1. **Focus on Ratios**: Compare relative performance rather than absolute times
+2. **Consider Percentiles**: P95 times indicate worst-case performance
+3. **Monitor Memory**: High allocation rates may indicate inefficiencies
+4. **Validate with Load Testing**: Supplement with realistic load testing
+
+## Troubleshooting
+
+### Common Issues
+
+**Docker Not Running**
+
+```
+Error: Docker is not running or not accessible
+Solution: Start Docker Desktop or Docker service
+```
+
+**Port Conflicts**
+
+```
+Error: Port 5432 is already in use
+Solution: Stop other PostgreSQL instances or change port in fixture
+```
+
+**Memory Issues**
+
+```
+Error: Out of memory during bulk operations
+Solution: Reduce bulk sizes or increase available memory
+```
+
+**Slow Benchmarks**
+
+```
+Issue: Benchmarks taking too long
+Solution: Reduce iteration counts or run specific benchmarks
+```
+
+**Setup Method Return Type Requirements**
+
+```
+Issue: How to handle async operations in BenchmarkDotNet setup methods
+Solution:
+- [GlobalSetup] and [GlobalCleanup] methods MUST return void (not Task)
+- [IterationSetup] and [IterationCleanup] methods can return either void or async Task
+- For async operations in GlobalSetup/GlobalCleanup, use .GetAwaiter().GetResult()
+- For IterationSetup/IterationCleanup, prefer async Task when awaiting async operations
+- Benchmark methods can be async and return Task
+```
+
+**Setup Method Best Practices**
+
+```
+Required Patterns:
+- [GlobalSetup] public void GlobalSetup() - MUST be void, use .GetAwaiter().GetResult() for async
+- [GlobalCleanup] public void GlobalCleanup() - MUST be void, use .GetAwaiter().GetResult() for async
+- [IterationSetup] public void IterationSetup() - can be void for fast setup
+- [IterationSetup] public async Task IterationSetup() - can be async Task for async operations
+- [IterationCleanup] public void IterationCleanup() - can be void for fast cleanup
+- [IterationCleanup] public async Task IterationCleanup() - can be async Task for async operations
+```
+
+## Extending Benchmarks
+
+To add new benchmarks:
+
+1. Create a new benchmark class inheriting from `IAsyncDisposable`
+2. Use `PostgreSqlBenchmarkFixture` for database setup
+3. Add appropriate BenchmarkDotNet attributes
+4. Update `Program.cs` to include the new benchmark
+5. Document the new benchmark in this README
+
+## Output Files
+
+Benchmarks generate several output files:
+
+- `BenchmarkDotNet.Artifacts/results/*.html` - HTML reports
+- `BenchmarkDotNet.Artifacts/results/*.md` - Markdown reports
+- `BenchmarkDotNet.Artifacts/results/*.csv` - CSV data
+- `BenchmarkDotNet.Artifacts/logs/*.log` - Execution logs
+
+## Contributing
+
+When adding new benchmarks:
+
+1. Follow the existing naming conventions
+2. Include appropriate cleanup logic
+3. Add comprehensive documentation
+4. Test with different parameter values
+5. Consider memory and performance implications
diff --git a/Benchmarks/UseCases/BulkOperationsBenchmark.cs b/Benchmarks/UseCases/BulkOperationsBenchmark.cs
new file mode 100644
index 0000000..7dcff55
--- /dev/null
+++ b/Benchmarks/UseCases/BulkOperationsBenchmark.cs
@@ -0,0 +1,348 @@
+using BenchmarkDotNet.Attributes;
+using Benchmarks.Fixtures;
+using Microsoft.Extensions.Caching.Distributed;
+using System.Text;
+using System.Text.Json;
+
+namespace Benchmarks.UseCases;
+
+///
+/// Bulk Operations Benchmark Suite
+///
+/// This comprehensive benchmark class evaluates the performance of PostgreSQL distributed cache
+/// under various bulk operation scenarios and high-throughput workloads. It's designed to help
+/// identify optimal strategies for applications that need to perform large-scale caching operations.
+///
+/// Configuration:
+///
+/// - Bulk Sizes: 10, 50, 100, 500 operations per test
+/// - Runtime: .NET 9.0
+/// - Metrics: Memory usage, execution time (mean, median, std dev, min, max)
+/// - Test Data: Both simple byte arrays and complex JSON objects
+///
+///
+/// Test Scenarios Covered:
+///
+/// - Sequential vs Parallel Operations: Compares performance between sequential and parallel execution patterns
+/// - CRUD Operations at Scale: Tests Set, Get, Refresh, and Remove operations with bulk data
+/// - JSON Serialization Performance: Evaluates overhead of storing/retrieving complex objects
+/// - Mixed Workload Simulation: Tests realistic scenarios with combined operation types
+/// - High-Throughput Scenarios: Stress tests the cache under heavy concurrent load
+///
+///
+///
+///
+[MemoryDiagnoser]
+[RankColumn]
+[MeanColumn, MedianColumn, StdDevColumn, MinColumn, MaxColumn]
+public class BulkOperationsBenchmark : IAsyncDisposable
+{
+ private PostgreSqlBenchmarkFixture _fixture = null!;
+ private IDistributedCache _cache = null!;
+ private readonly byte[] _testData = Encoding.UTF8.GetBytes("This is a test cache value for bulk operations benchmarking purposes.");
+ private readonly DistributedCacheEntryOptions _defaultOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30)
+ };
+
+ [Params(10, 50, 100, 500)]
+ public int BulkSize { get; set; }
+
+ // Complex object for JSON serialization benchmarks
+ private readonly TestObject _complexObject = new()
+ {
+ Id = 12345,
+ Name = "Test Object for Bulk Operations",
+ Description = "This is a more complex object that will be serialized to JSON and stored in the cache.",
+ CreatedAt = DateTime.UtcNow,
+ IsActive = true,
+ Tags = ["benchmark", "test", "performance", "cache"],
+ Metadata = new()
+ {
+ { "version", "1.0.0" },
+ { "environment", "benchmark" },
+ { "priority", 5 }
+ }
+ };
+
+ [GlobalSetup]
+ public void GlobalSetup()
+ {
+ _fixture = new PostgreSqlBenchmarkFixture();
+ _cache = _fixture.InitializeAsync().GetAwaiter().GetResult();
+
+ // Pre-populate cache with some data for bulk read operations
+ for (int i = 0; i < 1000; i++)
+ {
+ _cache.Set($"bulk_read_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [GlobalCleanup]
+ public void GlobalCleanup()
+ {
+ _fixture.DisposeAsync().GetAwaiter().GetResult();
+ }
+
+ [IterationSetup]
+ public async Task IterationSetup()
+ {
+ // Clean up and re-populate cache for consistent benchmarking
+ await _fixture.CleanupAsync();
+
+ for (int i = 0; i < Math.Min(BulkSize * 2, 500); i++)
+ {
+ _cache.Set($"bulk_read_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [Benchmark(Baseline = true)]
+ public string BulkSetSequential()
+ {
+ var baseKey = $"bulk_set_seq_{Random.Shared.Next(10000)}";
+
+ for (int i = 0; i < BulkSize; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ _cache.Set(key, _testData, _defaultOptions);
+ }
+ return "BulkSetSequential";
+ }
+
+ [Benchmark]
+ public async Task BulkSetParallel()
+ {
+ var baseKey = $"bulk_set_par_{Random.Shared.Next(10000)}";
+
+ // Using Parallel.ForAsync
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.SetAsync(key, _testData, _defaultOptions, ct);
+ });
+
+ // Alternative approach using Task array (previous implementation):
+ // var tasks = new Task[BulkSize];
+ // for (int i = 0; i < BulkSize; i++)
+ // {
+ // var key = $"{baseKey}_{i}";
+ // tasks[i] = _cache.SetAsync(key, _testData, _defaultOptions);
+ // }
+ // await Task.WhenAll(tasks);
+ }
+
+ [Benchmark]
+ public string BulkGetSequential()
+ {
+ for (int i = 0; i < BulkSize; i++)
+ {
+ var keyIndex = i % Math.Min(BulkSize * 2, 500);
+ _ = _cache.Get($"bulk_read_key_{keyIndex}");
+ }
+
+ return nameof(BulkGetSequential);
+ }
+
+ [Benchmark]
+ public async Task BulkGetParallel()
+ {
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var keyIndex = i % Math.Min(BulkSize * 2, 500);
+ var key = $"bulk_read_key_{keyIndex}";
+ _ = await _cache.GetAsync(key, ct);
+ });
+ return nameof(BulkGetParallel);
+ }
+
+ [Benchmark]
+ public async Task BulkMixedOperationsSequential()
+ {
+ var baseKey = $"bulk_mixed_seq_{Random.Shared.Next(10000)}";
+
+ for (int i = 0; i < BulkSize; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ var operationType = i % 4;
+
+ switch (operationType)
+ {
+ case 0:
+ await _cache.SetAsync(key, _testData, _defaultOptions);
+ break;
+ case 1:
+ await _cache.GetAsync(key);
+ break;
+ case 2:
+ await _cache.RefreshAsync(key);
+ break;
+ case 3:
+ await _cache.RemoveAsync(key);
+ break;
+ }
+ }
+ return nameof(BulkMixedOperationsSequential);
+ }
+
+ [Benchmark]
+ public async Task BulkMixedOperationsParallel()
+ {
+ var baseKey = $"bulk_mixed_par_{Random.Shared.Next(10000)}";
+
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ var operationType = i % 4;
+
+ await (operationType switch
+ {
+ 0 => _cache.SetAsync(key, _testData, _defaultOptions, ct),
+ 1 => GetAndIgnoreResult(key, ct),
+ 2 => _cache.RefreshAsync(key, ct),
+ 3 => _cache.RemoveAsync(key, ct),
+ _ => Task.CompletedTask
+ });
+ });
+ return nameof(BulkMixedOperationsParallel);
+ }
+
+ [Benchmark]
+ public async Task BulkJsonSerializationSet()
+ {
+ var baseKey = $"bulk_json_set_{Random.Shared.Next(10000)}";
+
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ var modifiedObject = new TestObject
+ {
+ Id = _complexObject.Id + i,
+ Name = $"{_complexObject.Name} #{i}",
+ Description = _complexObject.Description,
+ CreatedAt = _complexObject.CreatedAt.AddMinutes(i),
+ IsActive = _complexObject.IsActive,
+ Tags = _complexObject.Tags,
+ Metadata = new Dictionary(_complexObject.Metadata)
+ {
+ { "index", i }
+ }
+ };
+
+ var jsonData = JsonSerializer.SerializeToUtf8Bytes(modifiedObject);
+ await _cache.SetAsync(key, jsonData, _defaultOptions, ct);
+ });
+ return nameof(BulkJsonSerializationSet);
+ }
+
+ [Benchmark]
+ public async Task BulkJsonSerializationGet()
+ {
+ // First, set some JSON data
+ var baseKey = $"bulk_json_get_{Random.Shared.Next(10000)}";
+ var jsonData = JsonSerializer.SerializeToUtf8Bytes(_complexObject);
+
+ for (int i = 0; i < BulkSize; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.SetAsync(key, jsonData, _defaultOptions);
+ }
+
+ // Then, retrieve and deserialize them in parallel
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ _ = await GetAndDeserializeObject(key, ct);
+ });
+ return nameof(BulkJsonSerializationGet);
+ }
+
+ [Benchmark]
+ public async Task BulkRefreshOperations()
+ {
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var keyIndex = i % Math.Min(BulkSize * 2, 500);
+ var key = $"bulk_read_key_{keyIndex}";
+ await _cache.RefreshAsync(key, ct);
+ });
+ return nameof(BulkRefreshOperations);
+ }
+
+ [Benchmark]
+ public async Task BulkRemoveOperations()
+ {
+ var baseKey = $"bulk_remove_{Random.Shared.Next(10000)}";
+
+ // First, set the keys
+ for (int i = 0; i < BulkSize; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.SetAsync(key, _testData, _defaultOptions);
+ }
+
+ // Then, remove them in parallel
+ await Parallel.ForAsync(0, BulkSize, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.RemoveAsync(key, ct);
+ });
+ return nameof(BulkRemoveOperations);
+ }
+
+ [Benchmark]
+ public async Task HighThroughputScenario()
+ {
+ var totalOperations = BulkSize * 4; // 4 operations per bulk size
+ var baseKey = $"high_throughput_{Random.Shared.Next(10000)}";
+
+ await Parallel.ForAsync(0, totalOperations, async (i, ct) =>
+ {
+ var key = $"{baseKey}_{i}";
+ var operationType = i % 8;
+
+ await (operationType switch
+ {
+ 0 or 1 or 2 => _cache.SetAsync(key, _testData, _defaultOptions, ct), // 3/8 sets
+ 3 or 4 or 5 => GetAndIgnoreResult(key, ct), // 3/8 gets
+ 6 => _cache.RefreshAsync(key, ct), // 1/8 refreshes
+ 7 => _cache.RemoveAsync(key, ct), // 1/8 removes
+ _ => Task.CompletedTask
+ });
+ });
+ return nameof(HighThroughputScenario);
+ }
+
+ private async Task GetAndIgnoreResult(string key, CancellationToken cancellationToken = default)
+ {
+ await _cache.GetAsync(key, cancellationToken);
+ return key;
+ }
+
+ private async Task GetAndDeserializeObject(string key, CancellationToken cancellationToken = default)
+ {
+ var data = await _cache.GetAsync(key, cancellationToken);
+ if (data == null)
+ return null;
+
+ return JsonSerializer.Deserialize(data);
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ if (_fixture != null)
+ {
+ await _fixture.DisposeAsync();
+ }
+ }
+
+ public class TestObject
+ {
+ public int Id { get; set; }
+ public string Name { get; set; } = string.Empty;
+ public string Description { get; set; } = string.Empty;
+ public DateTime CreatedAt { get; set; }
+ public bool IsActive { get; set; }
+ public string[] Tags { get; set; } = Array.Empty();
+ public Dictionary Metadata { get; set; } = new();
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/UseCases/ConcurrencyBenchmark.cs b/Benchmarks/UseCases/ConcurrencyBenchmark.cs
new file mode 100644
index 0000000..799b966
--- /dev/null
+++ b/Benchmarks/UseCases/ConcurrencyBenchmark.cs
@@ -0,0 +1,256 @@
+using BenchmarkDotNet.Attributes;
+using Microsoft.Extensions.Caching.Distributed;
+using System.Text;
+using System.Collections.Concurrent;
+using System.Linq;
+using Benchmarks.Fixtures;
+
+namespace Benchmarks.UseCases;
+
+///
+/// Benchmarks for cache operations under concurrent access patterns
+///
+[MemoryDiagnoser]
+[RankColumn]
+[MeanColumn, MedianColumn, StdDevColumn, MinColumn, MaxColumn]
+public class ConcurrencyBenchmark : IAsyncDisposable
+{
+ private PostgreSqlBenchmarkFixture _fixture = null!;
+ private IDistributedCache _cache = null!;
+ private readonly byte[] _testData = Encoding.UTF8.GetBytes("This is a test cache value for concurrency benchmarking purposes.");
+ private readonly DistributedCacheEntryOptions _defaultOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30)
+ };
+
+ [Params(2, 4, 8, 16)]
+ public int ConcurrentTasks { get; set; }
+
+ [GlobalSetup]
+ public void GlobalSetup()
+ {
+ _fixture = new PostgreSqlBenchmarkFixture();
+ _cache = _fixture.InitializeAsync().GetAwaiter().GetResult();
+
+ // Pre-populate cache with some data for read operations
+ for (int i = 0; i < 1000; i++)
+ {
+ _cache.Set($"concurrent_read_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [GlobalCleanup]
+ public void GlobalCleanup()
+ {
+ _fixture.DisposeAsync().GetAwaiter().GetResult();
+ }
+
+ [IterationSetup]
+ public void IterationSetup()
+ {
+ // Clean up and re-populate cache for consistent benchmarking
+ _fixture.CleanupAsync().GetAwaiter().GetResult();
+
+ for (int i = 0; i < 100; i++)
+ {
+ _cache.Set($"concurrent_read_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [Benchmark(Baseline = true)]
+ public async Task ConcurrentSet()
+ {
+ var baseKey = $"concurrent_set_{Random.Shared.Next(10000)}";
+
+ await Parallel.ForEachAsync(
+ Enumerable.Range(0, ConcurrentTasks),
+ new ParallelOptions { MaxDegreeOfParallelism = ConcurrentTasks },
+ async (i, cancellationToken) =>
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.SetAsync(key, _testData, _defaultOptions, cancellationToken);
+ });
+
+ return nameof(ConcurrentSet);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentGet()
+ {
+ var tasks = new Task[ConcurrentTasks];
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"concurrent_read_key_{keyIndex}";
+ tasks[i] = _cache.GetAsync(key);
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentGet);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentMixedOperations()
+ {
+ var tasks = new Task[ConcurrentTasks];
+ var baseKey = $"concurrent_mixed_{Random.Shared.Next(10000)}";
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var operationType = i % 4;
+ var key = $"{baseKey}_{i}";
+
+ tasks[i] = operationType switch
+ {
+ 0 => _cache.SetAsync(key, _testData, _defaultOptions),
+ 1 => GetAndIgnoreResult(key),
+ 2 => _cache.RefreshAsync(key),
+ 3 => _cache.RemoveAsync(key),
+ _ => Task.CompletedTask
+ };
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentMixedOperations);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentSetSameKey()
+ {
+ var tasks = new Task[ConcurrentTasks];
+ var sharedKey = $"shared_key_{Random.Shared.Next(1000)}";
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var uniqueData = Encoding.UTF8.GetBytes($"Data from task {i}");
+ tasks[i] = _cache.SetAsync(sharedKey, uniqueData, _defaultOptions);
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentSetSameKey);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentGetSameKey()
+ {
+ var tasks = new Task[ConcurrentTasks];
+ var sharedKey = "concurrent_read_key_0"; // Use pre-populated key
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ tasks[i] = _cache.GetAsync(sharedKey);
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentGetSameKey);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentRefresh()
+ {
+ var tasks = new Task[ConcurrentTasks];
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"concurrent_read_key_{keyIndex}";
+ tasks[i] = _cache.RefreshAsync(key);
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentRefresh);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentRemove()
+ {
+ var tasks = new Task[ConcurrentTasks];
+ var baseKey = $"concurrent_remove_{Random.Shared.Next(10000)}";
+
+ // First, set the keys
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ await _cache.SetAsync(key, _testData, _defaultOptions);
+ }
+
+ // Then, remove them concurrently
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var key = $"{baseKey}_{i}";
+ tasks[i] = _cache.RemoveAsync(key);
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentRemove);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentHighContentionScenario()
+ {
+ var tasks = new Task[ConcurrentTasks];
+ var sharedKeys = new[] { "shared_key_1", "shared_key_2", "shared_key_3" };
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var taskIndex = i;
+ tasks[i] = Task.Run(async () =>
+ {
+ var key = sharedKeys[taskIndex % sharedKeys.Length];
+ var operations = new Func[]
+ {
+ () => _cache.SetAsync(key, _testData, _defaultOptions),
+ () => GetAndIgnoreResult(key),
+ () => _cache.RefreshAsync(key),
+ () => _cache.RemoveAsync(key)
+ };
+
+ var operation = operations[taskIndex % operations.Length];
+ await operation();
+ });
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentHighContentionScenario);
+ }
+
+ [Benchmark]
+ public async Task ConcurrentBulkOperations()
+ {
+ var tasks = new Task[ConcurrentTasks];
+
+ for (int i = 0; i < ConcurrentTasks; i++)
+ {
+ var taskIndex = i;
+ tasks[i] = Task.Run(async () =>
+ {
+ var batchSize = 10;
+ var baseBatchKey = $"batch_{taskIndex}_{Random.Shared.Next(1000)}";
+
+ // Each task performs a batch of operations
+ for (int j = 0; j < batchSize; j++)
+ {
+ var key = $"{baseBatchKey}_{j}";
+ await _cache.SetAsync(key, _testData, _defaultOptions);
+ }
+ });
+ }
+
+ await Task.WhenAll(tasks);
+ return nameof(ConcurrentBulkOperations);
+ }
+
+ private async Task GetAndIgnoreResult(string key)
+ {
+ await _cache.GetAsync(key);
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ if (_fixture != null)
+ {
+ await _fixture.DisposeAsync();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/UseCases/CoreOperationsBenchmark.cs b/Benchmarks/UseCases/CoreOperationsBenchmark.cs
new file mode 100644
index 0000000..a356aff
--- /dev/null
+++ b/Benchmarks/UseCases/CoreOperationsBenchmark.cs
@@ -0,0 +1,162 @@
+using BenchmarkDotNet.Attributes;
+using Benchmarks.Fixtures;
+using Microsoft.Extensions.Caching.Distributed;
+using System.Text;
+
+namespace Benchmarks.UseCases;
+
+///
+/// Core Operations Benchmark Suite
+///
+/// Benchmarks for core cache operations
+/// Test Data: Uses a UTF-8 encoded string of 53 bytes as test payload
+///
+/// Cache Setup
+///
+/// - Pre-populates 1000 keys during global setup
+/// - Refreshes with 100 keys before each iteration
+/// - Uses 30-minute sliding expiration
+///
+/// Performance Scenarios
+///
+/// - Cache Hits vs Cache Misses - Important distinction for real-world performance analysis
+/// - Async vs Sync operations - Comparing different execution models
+/// - Random key access - Simulates realistic usage patterns
+///
+///
+[MemoryDiagnoser]
+[RankColumn]
+[MeanColumn, MedianColumn, StdDevColumn, MinColumn, MaxColumn]
+public class CoreOperationsBenchmark : IAsyncDisposable
+{
+ private PostgreSqlBenchmarkFixture _fixture = null!;
+ private IDistributedCache _cache = null!;
+ private readonly byte[] _testData = Encoding.UTF8.GetBytes("This is a test cache value for benchmarking purposes.");
+ private readonly DistributedCacheEntryOptions _defaultOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30)
+ };
+
+ [GlobalSetup]
+ public void GlobalSetup()
+ {
+ _fixture = new PostgreSqlBenchmarkFixture();
+ _cache = _fixture.InitializeAsync().GetAwaiter().GetResult();
+
+ // Pre-populate some keys for Get and Refresh benchmarks
+ for (int i = 0; i < 1000; i++)
+ {
+ _cache.Set($"benchmark_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [GlobalCleanup]
+ public void GlobalCleanup()
+ {
+ _fixture.DisposeAsync().GetAwaiter().GetResult();
+ }
+
+ [IterationSetup]
+ public void IterationSetup()
+ {
+ // Clean up any keys that might have been created during the benchmark
+ _fixture.CleanupAsync().GetAwaiter().GetResult();
+
+ // Re-populate keys for Get and Refresh benchmarks
+ for (int i = 0; i < 100; i++)
+ {
+ _cache.Set($"benchmark_key_{i}", _testData, _defaultOptions);
+ }
+ }
+
+ [Benchmark(Baseline = true)]
+ public async Task SetAsync()
+ {
+ var key = $"set_key_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _defaultOptions);
+ return key;
+ }
+
+ [Benchmark]
+ public async Task GetAsync_Hit()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ return await _cache.GetAsync(key);
+ }
+
+ [Benchmark]
+ public async Task GetAsync_Miss()
+ {
+ var key = $"missing_key_{Random.Shared.Next(10000)}";
+ var result = await _cache.GetAsync(key);
+ return key;
+ }
+
+ [Benchmark]
+ public async Task RefreshAsync()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ await _cache.RefreshAsync(key);
+ return key;
+ }
+
+ [Benchmark]
+ public async Task RemoveAsync()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ await _cache.RemoveAsync(key);
+ return key;
+ }
+
+ [Benchmark]
+ public string SetSync()
+ {
+ string key = $"set_sync_key_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _testData, _defaultOptions);
+ return key;
+ }
+
+ [Benchmark]
+ public byte[]? GetSync_Hit()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ return _cache.Get(key);
+ }
+
+ [Benchmark]
+ public byte[]? GetSync_Miss()
+ {
+ var key = $"missing_sync_key_{Random.Shared.Next(10000)}";
+ return _cache.Get(key);
+ }
+
+ [Benchmark]
+ public string RefreshSync()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ _cache.Refresh(key);
+ return key;
+ }
+
+ [Benchmark]
+ public string RemoveSync()
+ {
+ var keyIndex = Random.Shared.Next(100);
+ var key = $"benchmark_key_{keyIndex}";
+ _cache.Remove(key);
+ return key;
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ if (_fixture != null)
+ {
+ await _fixture.DisposeAsync();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/UseCases/DataSizeBenchmark.cs b/Benchmarks/UseCases/DataSizeBenchmark.cs
new file mode 100644
index 0000000..66f98b3
--- /dev/null
+++ b/Benchmarks/UseCases/DataSizeBenchmark.cs
@@ -0,0 +1,207 @@
+using BenchmarkDotNet.Attributes;
+using Benchmarks.Fixtures;
+using Microsoft.Extensions.Caching.Distributed;
+using System.Text;
+
+namespace Benchmarks.UseCases;
+
+///
+/// Benchmarks for PostgreSQL cache operations across varying data payload sizes.
+/// Tests both synchronous and asynchronous Set/Get operations with 1KB, 10KB, 100KB, and 1MB payloads
+///
+[MemoryDiagnoser]
+[RankColumn]
+[MeanColumn, MedianColumn, StdDevColumn, MinColumn, MaxColumn]
+public class DataSizeBenchmark : IAsyncDisposable
+{
+ private PostgreSqlBenchmarkFixture _fixture = null!;
+ private IDistributedCache _cache = null!;
+
+ // Different payload sizes to test
+ private byte[] _smallData = null!; // 1 KB
+ private byte[] _mediumData = null!; // 10 KB
+ private byte[] _largeData = null!; // 100 KB
+ private byte[] _extraLargeData = null!; // 1 MB
+
+ private readonly DistributedCacheEntryOptions _defaultOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30)
+ };
+
+ [GlobalSetup]
+ public void GlobalSetup()
+ {
+ _fixture = new PostgreSqlBenchmarkFixture();
+ _cache = _fixture.InitializeAsync().GetAwaiter().GetResult();
+
+ // Create test data of different sizes
+ _smallData = CreateTestData(1024); // 1 KB
+ _mediumData = CreateTestData(10240); // 10 KB
+ _largeData = CreateTestData(102400); // 100 KB
+ _extraLargeData = CreateTestData(1048576); // 1 MB
+
+ // Pre-populate cache with different sized data
+ _cache.Set("small_data", _smallData, _defaultOptions);
+ _cache.Set("medium_data", _mediumData, _defaultOptions);
+ _cache.Set("large_data", _largeData, _defaultOptions);
+ _cache.Set("extra_large_data", _extraLargeData, _defaultOptions);
+ }
+
+ [GlobalCleanup]
+ public void GlobalCleanup()
+ {
+ _fixture.DisposeAsync().GetAwaiter().GetResult();
+ }
+
+ [IterationSetup]
+ public async Task IterationSetup()
+ {
+ // Re-populate cache with test data
+ await _cache.SetAsync("small_data", _smallData, _defaultOptions);
+ await _cache.SetAsync("medium_data", _mediumData, _defaultOptions);
+ await _cache.SetAsync("large_data", _largeData, _defaultOptions);
+ await _cache.SetAsync("extra_large_data", _extraLargeData, _defaultOptions);
+ }
+
+ [Benchmark(Baseline = true)]
+ public async Task SetAsync_Small_1KB()
+ {
+ var key = $"small_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _smallData, _defaultOptions);
+ return nameof(SetAsync_Small_1KB);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_Medium_10KB()
+ {
+ var key = $"medium_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _mediumData, _defaultOptions);
+ return nameof(SetAsync_Medium_10KB);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_Large_100KB()
+ {
+ var key = $"large_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _largeData, _defaultOptions);
+ return nameof(SetAsync_Large_100KB);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_ExtraLarge_1MB()
+ {
+ var key = $"extra_large_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _extraLargeData, _defaultOptions);
+ return nameof(SetAsync_ExtraLarge_1MB);
+ }
+
+ [Benchmark]
+ public async Task GetAsync_Small_1KB()
+ {
+ return await _cache.GetAsync("small_data");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_Medium_10KB()
+ {
+ return await _cache.GetAsync("medium_data");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_Large_100KB()
+ {
+ return await _cache.GetAsync("large_data");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_ExtraLarge_1MB()
+ {
+ return await _cache.GetAsync("extra_large_data");
+ }
+
+ [Benchmark]
+ public string SetSync_Small_1KB()
+ {
+ var key = $"small_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _smallData, _defaultOptions);
+ return nameof(SetSync_Small_1KB);
+
+ }
+
+ [Benchmark]
+ public string SetSync_Medium_10KB()
+ {
+ var key = $"medium_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _mediumData, _defaultOptions);
+ return nameof(SetSync_Medium_10KB);
+ }
+
+ [Benchmark]
+ public string SetSync_Large_100KB()
+ {
+ var key = $"large_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _largeData, _defaultOptions);
+ return nameof(SetSync_Large_100KB);
+ }
+
+ [Benchmark]
+ public string SetSync_ExtraLarge_1MB()
+ {
+ var key = $"extra_large_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _extraLargeData, _defaultOptions);
+ return nameof(SetSync_ExtraLarge_1MB);
+ }
+
+ [Benchmark]
+ public string GetSync_Small_1KB()
+ {
+ var result = _cache.Get("small_data");
+ return nameof(GetSync_Small_1KB); // Return a value to satisfy the compiler
+ }
+
+ [Benchmark]
+ public string GetSync_Medium_10KB()
+ {
+ var result = _cache.Get("medium_data");
+ return nameof(GetSync_Medium_10KB); // Return a value to satisfy the compiler
+ }
+
+ [Benchmark]
+ public string GetSync_Large_100KB()
+ {
+ var result = _cache.Get("large_data");
+ return nameof(GetSync_Large_100KB); // Return a value to satisfy the compiler
+ }
+
+ [Benchmark]
+ public string GetSync_ExtraLarge_1MB()
+ {
+ var result = _cache.Get("extra_large_data");
+ return nameof(GetSync_ExtraLarge_1MB); // Return a value to satisfy the compiler
+ }
+
+ ///
+ /// Creates test data of specified size with some variation to avoid compression
+ ///
+ private static byte[] CreateTestData(int sizeInBytes)
+ {
+ var data = new byte[sizeInBytes];
+ var random = new Random(42); // Fixed seed for reproducibility
+
+ // Fill with random data to avoid compression
+ for (int i = 0; i < sizeInBytes; i++)
+ {
+ data[i] = (byte)(random.Next(256));
+ }
+
+ return data;
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ if (_fixture != null)
+ {
+ await _fixture.DisposeAsync();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/UseCases/ExpirationBenchmark.cs b/Benchmarks/UseCases/ExpirationBenchmark.cs
new file mode 100644
index 0000000..4b2e161
--- /dev/null
+++ b/Benchmarks/UseCases/ExpirationBenchmark.cs
@@ -0,0 +1,250 @@
+using BenchmarkDotNet.Attributes;
+using Microsoft.Extensions.Caching.Distributed;
+using System.Text;
+using Benchmarks.Fixtures;
+
+namespace Benchmarks.UseCases;
+
+///
+/// Benchmarks for measuring the performance impact of different cache expiration strategies.
+/// Tests Set, Get, and Refresh operations with various expiration configurations including:
+///
+/// - No expiration (default sliding expiration)
+/// - Sliding expiration (30 minutes)
+/// - Absolute expiration (relative to now)
+/// - Absolute expiration (fixed time)
+/// - Combined sliding and absolute expiration
+/// - Short-term expiration (5 minutes)
+///
+/// Measures both async and sync operation performance.
+///
+[MemoryDiagnoser]
+[RankColumn]
+[MeanColumn, MedianColumn, StdDevColumn, MinColumn, MaxColumn]
+public class ExpirationBenchmark : IAsyncDisposable
+{
+ private PostgreSqlBenchmarkFixture _fixture = null!;
+ private IDistributedCache _cache = null!;
+ private readonly byte[] _testData = Encoding.UTF8.GetBytes("This is a test cache value for expiration benchmarking purposes.");
+
+ // Different expiration option configurations
+ private readonly DistributedCacheEntryOptions _noExpirationOptions = new()
+ {
+ // No expiration set - uses default sliding expiration
+ };
+
+ private readonly DistributedCacheEntryOptions _slidingExpirationOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30)
+ };
+
+ private readonly DistributedCacheEntryOptions _absoluteExpirationOptions = new()
+ {
+ AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(60)
+ };
+
+ private readonly DistributedCacheEntryOptions _absoluteExpirationFixedOptions = new()
+ {
+ AbsoluteExpiration = DateTimeOffset.UtcNow.AddMinutes(60)
+ };
+
+ private readonly DistributedCacheEntryOptions _bothExpirationOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(30),
+ AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(60)
+ };
+
+ private readonly DistributedCacheEntryOptions _shortExpirationOptions = new()
+ {
+ SlidingExpiration = TimeSpan.FromMinutes(5)
+ };
+
+ [GlobalSetup]
+ public void GlobalSetup()
+ {
+ _fixture = new PostgreSqlBenchmarkFixture();
+ _cache = _fixture.InitializeAsync().GetAwaiter().GetResult();
+
+ // Pre-populate cache with different expiration strategies
+ _cache.Set("no_expiration_key", _testData, _noExpirationOptions);
+ _cache.Set("sliding_expiration_key", _testData, _slidingExpirationOptions);
+ _cache.Set("absolute_expiration_key", _testData, _absoluteExpirationOptions);
+ _cache.Set("both_expiration_key", _testData, _bothExpirationOptions);
+ _cache.Set("short_expiration_key", _testData, _shortExpirationOptions);
+ }
+
+ [GlobalCleanup]
+ public void GlobalCleanup()
+ {
+ _fixture.DisposeAsync().GetAwaiter().GetResult();
+ }
+
+ [IterationSetup]
+ public async Task IterationSetup()
+ {
+ // Clean up and re-populate cache
+ await _fixture.CleanupAsync();
+
+ _cache.Set("no_expiration_key", _testData, _noExpirationOptions);
+ _cache.Set("sliding_expiration_key", _testData, _slidingExpirationOptions);
+ _cache.Set("absolute_expiration_key", _testData, _absoluteExpirationOptions);
+ _cache.Set("both_expiration_key", _testData, _bothExpirationOptions);
+ _cache.Set("short_expiration_key", _testData, _shortExpirationOptions);
+ }
+
+ [Benchmark(Baseline = true)]
+ public async Task SetAsync_NoExpiration()
+ {
+ var key = $"no_exp_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _noExpirationOptions);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_SlidingExpiration()
+ {
+ var key = $"sliding_exp_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _slidingExpirationOptions);
+ return nameof(SetAsync_SlidingExpiration);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_AbsoluteExpiration()
+ {
+ var key = $"absolute_exp_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _absoluteExpirationOptions);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_AbsoluteExpirationFixed()
+ {
+ var key = $"absolute_fixed_exp_set_{Random.Shared.Next(10000)}";
+ // Create new options with fresh absolute expiration time
+ var freshOptions = new DistributedCacheEntryOptions
+ {
+ AbsoluteExpiration = DateTimeOffset.UtcNow.AddMinutes(60)
+ };
+ await _cache.SetAsync(key, _testData, freshOptions);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_BothExpirations()
+ {
+ var key = $"both_exp_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _bothExpirationOptions);
+ }
+
+ [Benchmark]
+ public async Task SetAsync_ShortExpiration()
+ {
+ var key = $"short_exp_set_{Random.Shared.Next(10000)}";
+ await _cache.SetAsync(key, _testData, _shortExpirationOptions);
+ }
+
+ [Benchmark]
+ public async Task GetAsync_NoExpiration()
+ {
+ var result = await _cache.GetAsync("no_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_SlidingExpiration()
+ {
+ var result = await _cache.GetAsync("sliding_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_AbsoluteExpiration()
+ {
+ var result = await _cache.GetAsync("absolute_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_BothExpirations()
+ {
+ var result = await _cache.GetAsync("both_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task GetAsync_ShortExpiration()
+ {
+ var result = await _cache.GetAsync("short_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task RefreshAsync_SlidingExpiration()
+ {
+ await _cache.RefreshAsync("sliding_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task RefreshAsync_BothExpirations()
+ {
+ await _cache.RefreshAsync("both_expiration_key");
+ }
+
+ [Benchmark]
+ public async Task RefreshAsync_ShortExpiration()
+ {
+ await _cache.RefreshAsync("short_expiration_key");
+ }
+
+ [Benchmark]
+ public void SetSync_NoExpiration()
+ {
+ var key = $"no_exp_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _testData, _noExpirationOptions);
+ }
+
+ [Benchmark]
+ public void SetSync_SlidingExpiration()
+ {
+ var key = $"sliding_exp_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _testData, _slidingExpirationOptions);
+ }
+
+ [Benchmark]
+ public void SetSync_AbsoluteExpiration()
+ {
+ var key = $"absolute_exp_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _testData, _absoluteExpirationOptions);
+ }
+
+ [Benchmark]
+ public void SetSync_BothExpirations()
+ {
+ var key = $"both_exp_sync_set_{Random.Shared.Next(10000)}";
+ _cache.Set(key, _testData, _bothExpirationOptions);
+ }
+
+ [Benchmark]
+ public void GetSync_SlidingExpiration()
+ {
+ var result = _cache.Get("sliding_expiration_key");
+ }
+
+ [Benchmark]
+ public void GetSync_AbsoluteExpiration()
+ {
+ var result = _cache.Get("absolute_expiration_key");
+ }
+
+ [Benchmark]
+ public void RefreshSync_SlidingExpiration()
+ {
+ _cache.Refresh("sliding_expiration_key");
+ }
+
+ [Benchmark]
+ public void RefreshSync_BothExpirations()
+ {
+ _cache.Refresh("both_expiration_key");
+ }
+
+ public async ValueTask DisposeAsync()
+ {
+ if (_fixture != null)
+ {
+ await _fixture.DisposeAsync();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Benchmarks/run-benchmarks.cmd b/Benchmarks/run-benchmarks.cmd
new file mode 100644
index 0000000..3921bdd
--- /dev/null
+++ b/Benchmarks/run-benchmarks.cmd
@@ -0,0 +1,34 @@
+@echo off
+echo PostgreSQL Distributed Cache Benchmarks
+echo ========================================
+echo.
+
+REM Check if Docker is running
+docker info >nul 2>&1
+if %errorlevel% neq 0 (
+ echo ERROR: Docker is not running or not accessible.
+ echo Please start Docker Desktop and try again.
+ pause
+ exit /b 1
+)
+
+echo Docker is running. Starting benchmarks...
+echo.
+
+REM Set configuration to Release for accurate benchmarks
+set CONFIGURATION=Release
+
+REM Check if specific benchmark was requested
+if "%1"=="" (
+ echo Running all benchmarks...
+ dotnet run --configuration %CONFIGURATION%
+) else (
+ echo Running %1 benchmark...
+ dotnet run --configuration %CONFIGURATION% -- %1
+)
+
+echo.
+echo Benchmarks completed!
+echo Results can be found in BenchmarkDotNet.Artifacts\results\
+echo.
+pause
\ No newline at end of file
diff --git a/Benchmarks/run-benchmarks.sh b/Benchmarks/run-benchmarks.sh
new file mode 100644
index 0000000..47e11b5
--- /dev/null
+++ b/Benchmarks/run-benchmarks.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+echo "PostgreSQL Distributed Cache Benchmarks"
+echo "========================================"
+echo
+
+# Check if Docker is running
+if ! docker info >/dev/null 2>&1; then
+ echo "ERROR: Docker is not running or not accessible."
+ echo "Please start Docker service and try again."
+ exit 1
+fi
+
+echo "Docker is running. Starting benchmarks..."
+echo
+
+# Set configuration to Release for accurate benchmarks
+CONFIGURATION=Release
+
+# Check if specific benchmark was requested
+if [ -z "$1" ]; then
+ echo "Running all benchmarks..."
+ dotnet run --configuration $CONFIGURATION
+else
+ echo "Running $1 benchmark..."
+ dotnet run --configuration $CONFIGURATION -- $1
+fi
+
+echo
+echo "Benchmarks completed!"
+echo "Results can be found in BenchmarkDotNet.Artifacts/results/"
+echo
\ No newline at end of file
diff --git a/CachingTest/ReloadableConnectionStringTests.cs b/CachingTest/ReloadableConnectionStringTests.cs
new file mode 100644
index 0000000..59abc73
--- /dev/null
+++ b/CachingTest/ReloadableConnectionStringTests.cs
@@ -0,0 +1,203 @@
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Options;
+using Xunit;
+using Moq;
+
+namespace Community.Microsoft.Extensions.Caching.PostgreSql.Tests
+{
+ public class ReloadableConnectionStringTests
+ {
+ [Fact]
+ public void ReloadableConnectionStringProvider_InitializesCorrectly()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock();
+ var connectionStringKey = "TestConnectionString";
+ var reloadInterval = TimeSpan.FromMinutes(5);
+
+ configuration.Setup(c => c[connectionStringKey]).Returns("Host=localhost;Database=test");
+
+ // Act
+ using var provider = new ReloadableConnectionStringProvider(
+ configuration.Object,
+ logger.Object,
+ connectionStringKey,
+ reloadInterval);
+
+ // Assert
+ var connectionString = provider.GetConnectionString();
+ Assert.Equal("Host=localhost;Database=test", connectionString);
+ }
+
+ [Fact]
+ public void ReloadableConnectionStringProvider_HandlesNullConfiguration()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock();
+ var connectionStringKey = "TestConnectionString";
+ var reloadInterval = TimeSpan.FromMinutes(5);
+
+ configuration.Setup(c => c[connectionStringKey]).Returns((string)null);
+
+ // Act
+ using var provider = new ReloadableConnectionStringProvider(
+ configuration.Object,
+ logger.Object,
+ connectionStringKey,
+ reloadInterval);
+
+ // Assert
+ var connectionString = provider.GetConnectionString();
+ Assert.Equal(string.Empty, connectionString);
+ }
+
+ [Fact]
+ public void ReloadableConnectionStringProvider_HandlesConfigurationException()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock();
+ var connectionStringKey = "TestConnectionString";
+ var reloadInterval = TimeSpan.FromMinutes(5);
+
+ configuration.Setup(c => c[connectionStringKey]).Throws(new Exception("Configuration error"));
+
+ // Act
+ using var provider = new ReloadableConnectionStringProvider(
+ configuration.Object,
+ logger.Object,
+ connectionStringKey,
+ reloadInterval);
+
+ // Assert
+ var connectionString = provider.GetConnectionString();
+ Assert.Equal(string.Empty, connectionString);
+ }
+
+ [Fact]
+ public async Task ReloadableConnectionStringProvider_ManualReloadWorks()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock();
+ var connectionStringKey = "TestConnectionString";
+ var reloadInterval = TimeSpan.FromMinutes(5);
+
+ configuration.Setup(c => c[connectionStringKey]).Returns("Host=localhost;Database=test");
+
+ using var provider = new ReloadableConnectionStringProvider(
+ configuration.Object,
+ logger.Object,
+ connectionStringKey,
+ reloadInterval);
+
+ // Act
+ var connectionString = await provider.ReloadConnectionStringAsync();
+
+ // Assert
+ Assert.Equal("Host=localhost;Database=test", connectionString);
+ }
+
+ [Fact]
+ public void DatabaseOperations_WithReloadableConnectionString_InitializesCorrectly()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock>();
+ var options = new PostgreSqlCacheOptions
+ {
+ ConnectionStringKey = "TestConnectionString",
+ Configuration = configuration.Object,
+ Logger = logger.Object,
+ EnableConnectionStringReloading = true,
+ ConnectionStringReloadInterval = TimeSpan.FromMinutes(5),
+ SchemaName = "cache",
+ TableName = "cache_items",
+ CreateInfrastructure = false // Don't try to create schema/table for this test
+ };
+
+ configuration.Setup(c => c["TestConnectionString"]).Returns("Host=localhost;Database=test");
+
+ var optionsWrapper = new Mock>();
+ optionsWrapper.Setup(o => o.Value).Returns(options);
+
+ // Act & Assert
+ // This should not throw an exception
+ var databaseOperations = new DatabaseOperations(optionsWrapper.Object, logger.Object);
+ databaseOperations.Dispose();
+ }
+
+ [Fact]
+ public void DatabaseOperations_WithReloadableConnectionString_ValidatesRequiredProperties()
+ {
+ // Arrange
+ var logger = new Mock>();
+ var options = new PostgreSqlCacheOptions
+ {
+ // Missing required properties
+ };
+
+ var optionsWrapper = new Mock>();
+ optionsWrapper.Setup(o => o.Value).Returns(options);
+
+ // Act & Assert
+ Assert.Throws(() => new DatabaseOperations(optionsWrapper.Object, logger.Object));
+ }
+
+ [Fact]
+ public void DatabaseOperations_WithReloadableConnectionString_ValidatesSchemaName()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock>();
+ var options = new PostgreSqlCacheOptions
+ {
+ ConnectionStringKey = "TestConnectionString",
+ Configuration = configuration.Object,
+ Logger = logger.Object,
+ EnableConnectionStringReloading = true,
+ SchemaName = "", // Empty schema name
+ TableName = "cache_items"
+ };
+
+ configuration.Setup(c => c["TestConnectionString"]).Returns("Host=localhost;Database=test");
+
+ var optionsWrapper = new Mock>();
+ optionsWrapper.Setup(o => o.Value).Returns(options);
+
+ // Act & Assert
+ Assert.Throws(() => new DatabaseOperations(optionsWrapper.Object, logger.Object));
+ }
+
+ [Fact]
+ public void DatabaseOperations_WithReloadableConnectionString_ValidatesTableName()
+ {
+ // Arrange
+ var configuration = new Mock();
+ var logger = new Mock>();
+ var options = new PostgreSqlCacheOptions
+ {
+ ConnectionStringKey = "TestConnectionString",
+ Configuration = configuration.Object,
+ Logger = logger.Object,
+ EnableConnectionStringReloading = true,
+ SchemaName = "cache",
+ TableName = "" // Empty table name
+ };
+
+ configuration.Setup(c => c["TestConnectionString"]).Returns("Host=localhost;Database=test");
+
+ var optionsWrapper = new Mock>();
+ optionsWrapper.Setup(o => o.Value).Returns(options);
+
+ // Act & Assert
+ Assert.Throws(() => new DatabaseOperations(optionsWrapper.Object, logger.Object));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Extensions.Caching.PostgreSql/Community.Microsoft.Extensions.Caching.PostgreSql.csproj b/Extensions.Caching.PostgreSql/Community.Microsoft.Extensions.Caching.PostgreSql.csproj
index 1a15b60..183c729 100644
--- a/Extensions.Caching.PostgreSql/Community.Microsoft.Extensions.Caching.PostgreSql.csproj
+++ b/Extensions.Caching.PostgreSql/Community.Microsoft.Extensions.Caching.PostgreSql.csproj
@@ -4,11 +4,11 @@
Community.Microsoft.Extensions.Caching.PostgreSql
Community.Microsoft.Extensions.Caching.PostgreSql
false
- 5.0.0
+ 5.1.0-next
Ashley Marques
DistributedCache using postgres
- Dependencies updated to latest and now supports .NET 6.0, 8.0, and 9.0
+ Added Azure Key Vault rotation support with reloadable connection strings. Dependencies updated to latest and now supports .NET 6.0, 8.0, and 9.0
https://github.com/leonibr/community-extensions-cache-postgres
https://github.com/leonibr/community-extensions-cache-postgres.git
Github
diff --git a/Extensions.Caching.PostgreSql/DatabaseOperations.cs b/Extensions.Caching.PostgreSql/DatabaseOperations.cs
index 12de2a9..a03ba51 100644
--- a/Extensions.Caching.PostgreSql/DatabaseOperations.cs
+++ b/Extensions.Caching.PostgreSql/DatabaseOperations.cs
@@ -13,20 +13,23 @@
namespace Community.Microsoft.Extensions.Caching.PostgreSql
{
- internal sealed class DatabaseOperations : IDatabaseOperations
+ internal sealed class DatabaseOperations : IDatabaseOperations, IDisposable
{
private readonly ILogger _logger;
private readonly bool _updateOnGetCacheItem;
private readonly bool _readOnlyMode;
+ private readonly ReloadableConnectionStringProvider _connectionStringProvider;
public DatabaseOperations(IOptions options, ILogger logger)
{
var cacheOptions = options.Value;
- if (string.IsNullOrEmpty(cacheOptions.ConnectionString) && cacheOptions.DataSourceFactory is null)
+ if (string.IsNullOrEmpty(cacheOptions.ConnectionString) &&
+ string.IsNullOrEmpty(cacheOptions.ConnectionStringKey) &&
+ cacheOptions.DataSourceFactory is null)
{
throw new ArgumentException(
- $"Either {nameof(PostgreSqlCacheOptions.ConnectionString)} or {nameof(PostgreSqlCacheOptions.DataSourceFactory)} must be set.");
+ $"Either {nameof(PostgreSqlCacheOptions.ConnectionString)}, {nameof(PostgreSqlCacheOptions.ConnectionStringKey)}, or {nameof(PostgreSqlCacheOptions.DataSourceFactory)} must be set.");
}
if (string.IsNullOrEmpty(cacheOptions.SchemaName))
{
@@ -39,9 +42,21 @@ public DatabaseOperations(IOptions options, ILogger cacheOptions.DataSourceFactory.Invoke().CreateConnection()
- : new Func(() => new NpgsqlConnection(cacheOptions.ConnectionString));
+ : new Func(() => new NpgsqlConnection(GetConnectionString(cacheOptions)));
SystemClock = cacheOptions.SystemClock;
@@ -56,6 +71,15 @@ public DatabaseOperations(IOptions options, ILogger ConnectionFactory { get; }
@@ -295,5 +319,10 @@ private void ValidateOptions(TimeSpan? slidingExpiration, DateTimeOffset? absolu
"to be provided.");
}
}
+
+ public void Dispose()
+ {
+ _connectionStringProvider?.Dispose();
+ }
}
}
\ No newline at end of file
diff --git a/Extensions.Caching.PostgreSql/PostGreSqlCacheOptions.cs b/Extensions.Caching.PostgreSql/PostGreSqlCacheOptions.cs
index eca48ce..e82ab1b 100644
--- a/Extensions.Caching.PostgreSql/PostGreSqlCacheOptions.cs
+++ b/Extensions.Caching.PostgreSql/PostGreSqlCacheOptions.cs
@@ -2,6 +2,8 @@
using Microsoft.Extensions.Options;
using System;
using Npgsql;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.Logging;
namespace Community.Microsoft.Extensions.Caching.PostgreSql
{
@@ -19,6 +21,34 @@ public class PostgreSqlCacheOptions : IOptions
///
public string ConnectionString { get; set; }
+ ///
+ /// Configuration key for the connection string. Used for reloading from configuration sources like Azure Key Vault.
+ /// If set, the connection string will be reloaded from configuration when needed.
+ ///
+ public string ConnectionStringKey { get; set; }
+
+ ///
+ /// Configuration instance for reloading connection strings. Required when using .
+ ///
+ public IConfiguration Configuration { get; set; }
+
+ ///
+ /// Logger instance for connection string reloading operations.
+ ///
+ public ILogger Logger { get; set; }
+
+ ///
+ /// Time interval to check for connection string updates. Default is 5 minutes.
+ /// Only used when is set.
+ ///
+ public TimeSpan ConnectionStringReloadInterval { get; set; } = TimeSpan.FromMinutes(5);
+
+ ///
+ /// Whether to enable automatic connection string reloading from configuration.
+ /// Default is false.
+ ///
+ public bool EnableConnectionStringReloading { get; set; } = false;
+
///
/// An abstraction to represent the clock of a machine in order to enable unit testing.
///
diff --git a/Extensions.Caching.PostgreSql/PostgreSqlCacheServiceCollectionExtensions.cs b/Extensions.Caching.PostgreSql/PostgreSqlCacheServiceCollectionExtensions.cs
index db6f2de..5b651c4 100644
--- a/Extensions.Caching.PostgreSql/PostgreSqlCacheServiceCollectionExtensions.cs
+++ b/Extensions.Caching.PostgreSql/PostgreSqlCacheServiceCollectionExtensions.cs
@@ -5,6 +5,8 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Options;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.Logging;
namespace Community.Microsoft.Extensions.Caching.PostgreSql
{
@@ -28,10 +30,10 @@ public static IServiceCollection AddDistributedPostgreSqlCache(this IServiceColl
services.AddOptions();
AddPostgreSqlCacheServices(services);
-
+
return services;
}
-
+
///
/// Adds Community Microsoft PostgreSql distributed caching services to the specified .
///
@@ -79,14 +81,99 @@ public static IServiceCollection AddDistributedPostgreSqlCache(this IServiceColl
AddPostgreSqlCacheServices(services);
services.AddSingleton>(
sp => new ConfigureOptions(opt => setupAction(sp, opt)));
-
+
+ return services;
+ }
+
+ ///
+ /// Adds PostgreSQL distributed caching services with reloadable connection string support for Azure Key Vault rotation.
+ ///
+ /// The to add services to.
+ /// The configuration key for the connection string.
+ /// An to configure additional options.
+ /// The so that additional calls can be chained.
+ public static IServiceCollection AddDistributedPostgreSqlCacheWithReloadableConnection(
+ this IServiceCollection services,
+ string connectionStringKey,
+ Action setupAction = null)
+ {
+ if (services == null)
+ {
+ throw new ArgumentNullException(nameof(services));
+ }
+
+ if (string.IsNullOrEmpty(connectionStringKey))
+ {
+ throw new ArgumentException("Connection string key cannot be null or empty.", nameof(connectionStringKey));
+ }
+
+ services.AddOptions();
+ AddPostgreSqlCacheServices(services);
+ services.AddSingleton>(
+ sp => new ConfigureOptions(options =>
+ {
+ var configuration = sp.GetRequiredService();
+ var logger = sp.GetRequiredService>();
+
+ options.ConnectionStringKey = connectionStringKey;
+ options.Configuration = configuration;
+ options.Logger = logger;
+ options.EnableConnectionStringReloading = true;
+
+ setupAction?.Invoke(options);
+ }));
+
+ return services;
+ }
+
+ ///
+ /// Adds PostgreSQL distributed caching services with reloadable connection string support for Azure Key Vault rotation.
+ ///
+ /// The to add services to.
+ /// The configuration key for the connection string.
+ /// The interval to check for connection string updates.
+ /// An to configure additional options.
+ /// The so that additional calls can be chained.
+ public static IServiceCollection AddDistributedPostgreSqlCacheWithReloadableConnection(
+ this IServiceCollection services,
+ string connectionStringKey,
+ TimeSpan reloadInterval,
+ Action setupAction = null)
+ {
+ if (services == null)
+ {
+ throw new ArgumentNullException(nameof(services));
+ }
+
+ if (string.IsNullOrEmpty(connectionStringKey))
+ {
+ throw new ArgumentException("Connection string key cannot be null or empty.", nameof(connectionStringKey));
+ }
+
+ services.AddOptions();
+ AddPostgreSqlCacheServices(services);
+ services.AddSingleton>(
+ sp => new ConfigureOptions(options =>
+ {
+ var configuration = sp.GetRequiredService();
+ var logger = sp.GetRequiredService>();
+
+ options.ConnectionStringKey = connectionStringKey;
+ options.Configuration = configuration;
+ options.Logger = logger;
+ options.EnableConnectionStringReloading = true;
+ options.ConnectionStringReloadInterval = reloadInterval;
+
+ setupAction?.Invoke(options);
+ }));
+
return services;
}
// to enable unit testing
- private static void AddPostgreSqlCacheServices(IServiceCollection services)
+ private static void AddPostgreSqlCacheServices(IServiceCollection services)
{
- services.AddSingleton();
+ services.AddSingleton();
services.AddSingleton();
services.AddSingleton();
}
diff --git a/Extensions.Caching.PostgreSql/ReloadableConnectionStringProvider.cs b/Extensions.Caching.PostgreSql/ReloadableConnectionStringProvider.cs
new file mode 100644
index 0000000..aacb574
--- /dev/null
+++ b/Extensions.Caching.PostgreSql/ReloadableConnectionStringProvider.cs
@@ -0,0 +1,127 @@
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Options;
+using Npgsql;
+
+namespace Community.Microsoft.Extensions.Caching.PostgreSql
+{
+ ///
+ /// Provides reloadable connection strings from configuration sources like Azure Key Vault.
+ /// This enables automatic connection string updates when secrets are rotated in Azure Key Vault.
+ ///
+ internal class ReloadableConnectionStringProvider : IDisposable
+ {
+ private readonly IConfiguration _configuration;
+ private readonly ILogger _logger;
+ private readonly string _connectionStringKey;
+ private readonly TimeSpan _reloadInterval;
+ private readonly Timer _reloadTimer;
+ private string _currentConnectionString;
+ private DateTime _lastReloadTime;
+ private readonly object _lockObject = new object();
+
+ public ReloadableConnectionStringProvider(
+ IConfiguration configuration,
+ ILogger logger,
+ string connectionStringKey,
+ TimeSpan reloadInterval)
+ {
+ _configuration = configuration ?? throw new ArgumentNullException(nameof(configuration));
+ _logger = logger ?? throw new ArgumentNullException(nameof(logger));
+ _connectionStringKey = connectionStringKey ?? throw new ArgumentNullException(nameof(connectionStringKey));
+ _reloadInterval = reloadInterval;
+
+ // Initial load
+ _currentConnectionString = LoadConnectionString();
+ _lastReloadTime = DateTime.UtcNow;
+
+ // Start timer for periodic reloading
+ _reloadTimer = new Timer(OnReloadTimer, null, _reloadInterval, _reloadInterval);
+ }
+
+ ///
+ /// Gets the current connection string, reloading it if necessary.
+ ///
+ public string GetConnectionString()
+ {
+ lock (_lockObject)
+ {
+ // Check if it's time to reload
+ if (DateTime.UtcNow - _lastReloadTime >= _reloadInterval)
+ {
+ var newConnectionString = LoadConnectionString();
+ if (newConnectionString != _currentConnectionString)
+ {
+ _logger.LogInformation("Connection string updated from configuration key: {Key}", _connectionStringKey);
+ _currentConnectionString = newConnectionString;
+ }
+ _lastReloadTime = DateTime.UtcNow;
+ }
+
+ return _currentConnectionString;
+ }
+ }
+
+ ///
+ /// Forces a reload of the connection string from configuration.
+ ///
+ public async Task ReloadConnectionStringAsync()
+ {
+ return await Task.Run(() =>
+ {
+ lock (_lockObject)
+ {
+ var newConnectionString = LoadConnectionString();
+ if (newConnectionString != _currentConnectionString)
+ {
+ _logger.LogInformation("Connection string manually reloaded from configuration key: {Key}", _connectionStringKey);
+ _currentConnectionString = newConnectionString;
+ }
+ _lastReloadTime = DateTime.UtcNow;
+ return _currentConnectionString;
+ }
+ });
+ }
+
+ private string LoadConnectionString()
+ {
+ try
+ {
+ var connectionString = _configuration[_connectionStringKey];
+ if (string.IsNullOrEmpty(connectionString))
+ {
+ _logger.LogWarning("Connection string not found for key: {Key}", _connectionStringKey);
+ return _currentConnectionString ?? string.Empty;
+ }
+
+ return connectionString;
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError(ex, "Error loading connection string from configuration key: {Key}", _connectionStringKey);
+ return _currentConnectionString ?? string.Empty;
+ }
+ }
+
+ private void OnReloadTimer(object state)
+ {
+ try
+ {
+ // This will trigger a reload check on the next GetConnectionString call
+ _logger.LogDebug("Connection string reload timer triggered for key: {Key}", _connectionStringKey);
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError(ex, "Error in connection string reload timer for key: {Key}", _connectionStringKey);
+ }
+ }
+
+ public void Dispose()
+ {
+ _reloadTimer?.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/PostgresSqlCacheSolution.sln b/PostgresSqlCacheSolution.sln
index 9916e3a..9d4a86e 100644
--- a/PostgresSqlCacheSolution.sln
+++ b/PostgresSqlCacheSolution.sln
@@ -20,6 +20,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CachingTest", "CachingTest\CachingTest.csproj", "{C28627FD-B9F3-42D4-ABC4-345F932837BD}"
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Benchmarks", "Benchmarks\Benchmarks.csproj", "{8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -78,6 +80,18 @@ Global
{C28627FD-B9F3-42D4-ABC4-345F932837BD}.Release|x64.Build.0 = Release|Any CPU
{C28627FD-B9F3-42D4-ABC4-345F932837BD}.Release|x86.ActiveCfg = Release|Any CPU
{C28627FD-B9F3-42D4-ABC4-345F932837BD}.Release|x86.Build.0 = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|x64.Build.0 = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Debug|x86.Build.0 = Debug|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|Any CPU.Build.0 = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|x64.ActiveCfg = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|x64.Build.0 = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|x86.ActiveCfg = Release|Any CPU
+ {8E8B8E33-DF07-4C42-8789-2F17EA95B7A8}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/README-Benchmarks.md b/README-Benchmarks.md
new file mode 100644
index 0000000..c9ebb2a
--- /dev/null
+++ b/README-Benchmarks.md
@@ -0,0 +1,261 @@
+# π Performance Benchmarks with Historical Tracking
+
+This repository includes comprehensive performance benchmarking with **historical trend analysis** and **regression detection** - exactly what you need to track performance evolution over time rather than just individual snapshots.
+
+## π Quick Start
+
+### 1. Initial Setup (One-time)
+
+```bash
+# 1. Run the setup workflow in GitHub Actions
+Go to: Actions β "Setup Benchmark Dashboard" β Run workflow
+
+# 2. Enable GitHub Pages
+Settings β Pages β Deploy from branch "gh-pages"
+
+# 3. Wait 5-10 minutes for deployment
+```
+
+### 2. View Your Dashboard
+
+Your live performance dashboard will be at:
+**`https://[username].github.io/[repository]/benchmarks/`**
+
+### 3. Start Tracking Performance
+
+```bash
+# Automatically runs Monday & Thursday
+# Or trigger manually: Actions β "Scheduled Performance Benchmarks"
+
+# For PRs, add 'performance' label or '[perf]' in title
+```
+
+## π― What You Get: Historical Analysis Instead of Snapshots
+
+### β Before: Individual Results Only
+
+- Single point-in-time measurements
+- No trend analysis
+- Manual regression detection
+- No baseline comparison
+
+### β
After: Complete Historical Tracking
+
+- **π Time series charts** showing performance evolution
+- **β‘ Automatic regression detection** (alerts at 20-50% degradation)
+- **π― Baseline comparisons** for every PR
+- **π Commit correlation** to identify performance-impacting changes
+- **π Multi-metric analysis** (time, memory, percentiles)
+
+## π Interactive Dashboard Features
+
+### Historical Trend Charts
+
+- **Performance over time** with commit correlation
+- **Multiple metrics** visualization (execution time, memory allocation)
+- **Zoom and pan** functionality for detailed analysis
+- **Release markers** showing version-to-version changes
+
+### Regression Detection System
+
+- **Automatic alerts** when performance degrades
+- **Configurable thresholds**: 20% for PRs, 50% for scheduled runs
+- **Commit-level correlation** for root cause analysis
+- **Visual indicators** highlighting problem areas
+
+### Comparative Analysis
+
+- **PR vs Main branch** baseline comparisons
+- **Release-to-release** performance tracking
+- **Cross-benchmark** correlation analysis
+- **Long-term trend** identification
+
+## π Workflow Integration
+
+### 1. Scheduled Monitoring
+
+- **When**: Monday & Thursday at 2 AM UTC + code changes
+- **Purpose**: Track performance trends over time
+- **Result**: Historical database updates + dashboard refresh
+
+### 2. PR Performance Validation
+
+- **When**: PRs with `performance` label or `[perf]` in title
+- **Purpose**: Catch regressions before merge
+- **Result**: PR comment with baseline comparison + regression alerts
+
+### 3. Release Performance Validation
+
+- **When**: New releases published
+- **Purpose**: Comprehensive validation with full suite
+- **Result**: Release notes update + performance review issue
+
+## π Example Dashboard Views
+
+### Core Operations Trends
+
+```
+Performance (ms) over Time
+ β
+100 | β
+ 75 | β β
+ 50 | β ββββ Recent improvement
+ 25 | β
+ βββββββββββββββ
+ Commits over time
+```
+
+### Regression Detection
+
+```
+PR #123: [perf] Optimize caching
+π΄ Performance Alert: 25% degradation detected
+π Baseline: 45ms β Current: 56ms
+π View trend: [Dashboard Link]
+```
+
+## π οΈ Technical Implementation
+
+### Tools Used
+
+- **[github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark)**: Historical data storage & visualization
+- **GitHub Pages**: Free dashboard hosting
+- **BenchmarkDotNet**: .NET performance measurement
+- **Chart.js**: Interactive charting
+
+### Data Storage
+
+- **Location**: `gh-pages` branch
+- **Format**: JSON time series data
+- **Retention**:
+ - Scheduled runs: 30 days artifacts + permanent dashboard data
+ - PR runs: 14 days artifacts
+ - Release runs: 1 year artifacts + permanent dashboard data
+
+### Alert Thresholds
+
+- **PR validation**: 20% degradation (sensitive)
+- **Scheduled monitoring**: 50% degradation (major issues)
+- **Customizable** via workflow configuration
+
+## π Benchmark Types
+
+| Benchmark | Measures | Runtime | Dashboard Chart |
+| ------------- | ----------------------------------------- | ---------- | -------------------- |
+| `core` | Basic operations (Get/Set/Delete/Refresh) | ~5-10 min | Real-time trends |
+| `datasize` | 1KB to 1MB payload performance | ~10-15 min | Size impact analysis |
+| `expiration` | Different expiration strategies | ~10-15 min | Strategy comparison |
+| `concurrency` | 2-16 concurrent operations | ~15-20 min | Scalability trends |
+| `bulk` | Batch operations (10-500 items) | ~15-25 min | Throughput analysis |
+
+## π― Usage Examples
+
+### For Performance-Sensitive PRs
+
+```bash
+# 1. Create PR with performance testing
+git checkout -b feature/optimize-caching
+# ... make changes ...
+git commit -m "[perf] Optimize connection pooling"
+git push origin feature/optimize-caching
+
+# 2. GitHub automatically:
+# - Runs core benchmark
+# - Compares vs main branch
+# - Posts results with historical context
+# - Alerts if regression detected
+```
+
+### For Release Performance Validation
+
+```bash
+# When you create a release, GitHub automatically:
+# - Runs full benchmark suite (all 5 types)
+# - Updates dashboard with release data
+# - Adds performance summary to release notes
+# - Creates performance review issue
+```
+
+### For Regular Monitoring
+
+```bash
+# Automatic scheduled runs every Monday & Thursday
+# Dashboard continuously updated with trends
+# Regression alerts on significant changes
+```
+
+## π Interpreting Results
+
+### Green Trends π’
+
+- Stable or improving performance
+- Low variability between runs
+- Memory allocations stable
+
+### Yellow Warnings π‘
+
+- Minor performance changes
+- Increased variability
+- Worth monitoring
+
+### Red Alerts π΄
+
+- Significant regressions detected
+- High memory growth
+- Requires investigation
+
+## π Documentation
+
+- **[Complete Guide](docs/PerformanceTesting.md)**: Detailed setup and usage
+- **[Benchmark README](Benchmarks/README.md)**: Technical implementation details
+- **[Workflow Files](.github/workflows/)**: GitHub Actions configuration
+
+## π€ Contributing
+
+### Adding Performance Testing to PRs
+
+```bash
+# Option 1: Add label
+Add 'performance' label to your PR
+
+# Option 2: Include in title
+Title: "[perf] Your change description"
+```
+
+### Investigating Regressions
+
+1. **Check dashboard**: Click regression markers for details
+2. **Review commit**: Examine code changes in flagged commit
+3. **Test locally**: Reproduce with `dotnet run --configuration Release`
+4. **Compare baselines**: Use dashboard historical data
+
+## β FAQ
+
+**Q: Why not run benchmarks on every PR?**
+A: Performance tests are resource-intensive (5-90 minutes). Our opt-in approach prevents CI bottlenecks while providing comprehensive testing when needed.
+
+**Q: How accurate are GitHub Actions runner results?**
+A: Individual results have some variance due to shared infrastructure. The value is in **relative trends and regression detection** rather than absolute performance numbers.
+
+**Q: Can I customize alert thresholds?**
+A: Yes! Edit the `alert-threshold` values in the workflow files. More sensitive for critical performance paths, less sensitive for secondary features.
+
+**Q: How do I view historical data offline?**
+A: Clone the `gh-pages` branch - all data is stored as JSON files you can analyze locally.
+
+---
+
+## π Result: Complete Historical Performance Tracking
+
+Instead of individual benchmark snapshots, you now have:
+
+β
**Continuous performance timeline** showing evolution over time
+β
**Automatic regression detection** with commit-level correlation
+β
**Interactive dashboard** for trend analysis and investigation
+β
**Baseline comparisons** for every performance-sensitive change
+β
**Zero-cost hosting** using GitHub Pages
+β
**Seamless CI/CD integration** with existing workflows
+
+**Your dashboard**: `https://leonibr.github.io/community-extensions-cache-postgres/benchmarks/`
+
+The horizontal, historical view you requested is now built into every benchmark run! π
diff --git a/README.md b/README.md
index 7a13e8e..0a0564f 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
ο»Ώ# PostgreSQL Distributed Cache for .NET Core | Community Edition
[](https://www.nuget.org/packages/Community.Microsoft.Extensions.Caching.PostgreSql)
+[](https://leonibr.github.io/community-extensions-cache-postgres/coverage/)
## Introduction
@@ -12,7 +13,7 @@ This library allows you to seamlessly integrate caching into your ASP.NET / .NET
1. If you already use PostgreSQL, this package avoids the need for additional caching solutions like Redis, reducing infrastructure overhead.
1. Optimized for fast read and write operations with PostgreSQL, providing excellent caching performance. It is not a competitor to Redis, but it is a good alternative for some scenarios.
-1. Dstributed cache supports scaling of multiple instances and high loads.
+1. Distributed cache supports scaling of multiple instances and high loads.
1. Simple setup process using standard ASP.NET Core / .NET Core dependency injection.
1. Provides flexible configuration options including cache expiration policies, background cleanup tasks, read-only mode, and more.
1. Benefit from the power of open source and a community-driven approach to caching.
@@ -20,27 +21,28 @@ This library allows you to seamlessly integrate caching into your ASP.NET / .NET
## Table of Contents
1. [Getting Started](#getting-started)
-2. [Installation](#installation)
-3. [Basic Configuration](#basic-configuration)
-4. [Configuration Options](#configuration-options)
+ - [Installation](#installation)
+ - [Basic Configuration](#basic-configuration)
+1. [Configuration Options](#configuration-options)
- [Disable Remove Expired](#disable-remove-expired-true-use-case-default-false)
- [Update on Get Cache Item](#updateongetcacheitem--false-use-case-default-true)
- [Read Only Mode](#readonlymode--true-use-case-default-false)
- [Create Infrastructure](#createinfrastructure--true-use-case)
-5. [Usage Examples](#usage-examples)
+1. [Usage Examples](#usage-examples)
- [Basic Example](#basic-example)
- [Using Custom Options](#using-custom-options)
-6. [Running the Console Sample](#runing-the-console-sample)
-7. [Running the React+WebApi Web Sample](#runing-the-reactwebapi-websample-project)
-8. [Change Log](#change-log)
-9. [Contributing](#contributing)
-10. [License](#license)
-11. [FAQ](#faq)
-12. [Troubleshooting](#troubleshooting)
+1. [Code Coverage](#code-coverage)
+1. [Running the Console Sample](#running-the-console-sample)
+1. [Running the React+WebApi Web Sample](#running-the-reactwebapi-websample-project)
+1. [Change Log](#change-log)
+1. [Contributing](#contributing)
+1. [License](#license)
+1. [FAQ](#faq)
+1. [Troubleshooting](#troubleshooting)
## Getting Started
-### 1. Installation
+### Installation
Install the package via the .NET CLI:
@@ -48,7 +50,7 @@ Install the package via the .NET CLI:
dotnet add package Community.Microsoft.Extensions.Caching.PostgreSql
```
-### 2. Basic Configuration
+### Basic Configuration
Add the following line to your `Startup.cs` or `Program.cs`'s `ConfigureServices` method:
@@ -99,51 +101,22 @@ IConfigureOptions
## Configuration Options
-### `DisableRemoveExpired = True` use case (default false):
+The following options can be set when configuring the PostgreSQL distributed cache. Each option is described with its purpose, recommended use cases, and any pros/cons to help you decide the best configuration for your scenario.
-When you have 2 or more instances/microservices/processes and you want to leave only one instance to remove expired items.
+**For detailed explanations, usage guidance, and pros/cons for each option, see the [Options Details & Usage Guidance](docs/OptionsDetails.md) document.**
-- **Note 1:** This is not mandatory; assess whether it fits your needs.
-- **Note 2:** If you have only one instance and set this to `True`, expired items will not be automatically removed. When calling `GetItem`, expired items are filtered out. In this scenario, you are responsible for manually removing the expired keys or updating them.
+| Option | Type | Default | Description |
+| --------------------------------------------------------------------------------------- | -------- | -------- | ---------------------------------------------------------------- |
+| `ConnectionString` | string | β | The PostgreSQL connection string. **Required.** |
+| `SchemaName` | string | "public" | The schema where the cache table will be created. |
+| `TableName` | string | "cache" | The name of the cache table. |
+| [`DisableRemoveExpired`](docs/OptionsDetails.md#1-disableremoveexpired) | bool | false | Disables automatic removal of expired cache items. |
+| [`UpdateOnGetCacheItem`](docs/OptionsDetails.md#2-updateongetcacheitem) | bool | true | Updates sliding expiration on cache reads. |
+| [`ReadOnlyMode`](docs/OptionsDetails.md#3-readonlymode) | bool | false | Enables read-only mode (no writes, disables sliding expiration). |
+| [`CreateInfrastructure`](docs/OptionsDetails.md#4-createinfrastructure) | bool | true | Automatically creates the schema/table if they do not exist. |
+| [`ExpiredItemsDeletionInterval`](docs/OptionsDetails.md#5-expireditemsdeletioninterval) | TimeSpan | 30 min | How often expired items are deleted (min: 5 min). |
-### `UpdateOnGetCacheItem = false` use case (default true):
-
-If you (or the implementation using this cache) are explicitly calling `IDistributedCache.Refresh` to update the sliding window, you can turn off `UpdateOnGetCacheItem` to remove the extra DB expiration update call prior to reading the cached value. This is useful when used with ASP.NET Core Session handling.
-
-```csharp
-services.AddDistributedPostgreSqlCache((serviceProvider, setup) =>
-{
- ...
- setup.UpdateOnGetCacheItem = false;
- // Or
- var configuration = serviceProvider.GetRequiredService();
- setup.UpdateOnGetCacheItem = configuration["UpdateOnGetCacheItem"];
- ...
-});
-```
-
-### `ReadOnlyMode = true` use case (default false):
-
-For read-only databases, or if the database user lacks `write` permissions, you can set `ReadOnlyMode = true`.
-
-- **Note 1:** This will disable sliding expiration; only absolute expiration will work.
-- **Note 2:** This can improve performance, but you will not be able to change any cache values.
-
-```csharp
-services.AddDistributedPostgreSqlCache((serviceProvider, setup) =>
-{
- ...
- setup.ReadOnlyMode = true;
- // Or
- var configuration = serviceProvider.GetRequiredService();
- setup.ReadOnlyMode = configuration["UpdateOnGetCacheItem"];
- ...
-});
-```
-
-### `CreateInfrastructure = true` use case:
-
-This creates the table and schema for storing the cache (names are configurable) if they don't exist.
+---
## Usage Examples
@@ -204,6 +177,73 @@ This creates the table and schema for storing the cache (names are configurable)
});
```
+### Azure Key Vault Rotation Support
+
+For applications using Azure Key Vault for secret management, this library provides built-in support for automatic connection string reloading when secrets are rotated.
+
+#### Quick Setup
+
+```csharp
+// Install required packages
+// dotnet add package Azure.Security.KeyVault.Secrets
+// dotnet add package Azure.Identity
+// dotnet add package Microsoft.Extensions.Configuration.AzureKeyVault
+
+// Configure Azure Key Vault
+var keyVaultUrl = $"https://{builder.Configuration["AzureKeyVault:VaultName"]}.vault.azure.net/";
+var credential = new ClientSecretCredential(
+ builder.Configuration["AzureKeyVault:TenantId"],
+ builder.Configuration["AzureKeyVault:ClientId"],
+ builder.Configuration["AzureKeyVault:ClientSecret"]);
+
+var secretClient = new SecretClient(new Uri(keyVaultUrl), credential);
+builder.Configuration.AddAzureKeyVault(secretClient, new AzureKeyVaultConfigurationOptions());
+
+// Configure cache with reloadable connection string
+builder.Services.AddDistributedPostgreSqlCacheWithReloadableConnection(
+ connectionStringKey: "PostgreSqlCache:ConnectionString",
+ reloadInterval: TimeSpan.FromMinutes(5),
+ setupAction: options =>
+ {
+ options.SchemaName = "cache";
+ options.TableName = "cache_items";
+ options.CreateInfrastructure = true;
+ });
+```
+
+#### Features
+
+- **Automatic Reloading**: Periodically checks for updated connection strings
+- **Configurable Intervals**: Set how often to check for updates (default: 5 minutes)
+- **Thread-Safe**: Safe concurrent access to connection string updates
+- **Comprehensive Logging**: Detailed logging of connection string changes
+- **Graceful Fallback**: Continues using existing connection string if reload fails
+
+For detailed implementation guide, see [Azure Key Vault Rotation Support](AZURE_KEY_VAULT_ROTATION.md).
+
+## Code Coverage
+
+This project maintains comprehensive test coverage to ensure reliability and quality. You can view the current coverage status and detailed reports in several ways:
+
+### Coverage Badge
+
+The coverage badge in the header shows the current test coverage percentage. Click on it to view the detailed HTML coverage report.
+
+### Coverage Reports
+
+- **HTML Report**: Available at [https://leonibr.github.io/community-extensions-cache-postgres/coverage/](https://leonibr.github.io/community-extensions-cache-postgres/coverage/)
+- **GitHub Actions**: Coverage reports are generated automatically on every push to the main branch
+- **Local Generation**: Run `dotnet test --collect:"XPlat Code Coverage"` to generate coverage reports locally
+
+### Coverage Details
+
+The coverage report includes:
+
+- Line coverage for all source files
+- Branch coverage analysis
+- Detailed breakdown by class and method
+- Historical coverage trends
+
## Running the Console Sample
You will need a local PostgreSQL server with the following:
@@ -254,21 +294,20 @@ prepare-database.cmd -erase // windows
## Change Log
-1. v5.0.0 - Added support for .NET 9
- 1. [BREAKING CHANGE] - Dropped support for .NETStandard2.0
- 1. [BREAKING CHANGE] - Supports .NET 9, .NET 8 and .NET 6
-1. v4.0.1 - Added support for .NET 7
- 1. [BREAKING CHANGE] - Dropped support for .NET 5
- 2. [BREAKING CHANGE] - Now uses stored procedures (won't work with PostgreSQL <= 10, use version 3)
-1. v3.1.2 - Removed dependency for `IHostApplicationLifetime` if not supported on the platform (e.g., AWS) - issue #28
-1. v3.1.0 - Added log messages on `Debug` Level, multitarget .NET 5 and .NET 6, dropped support for netstandard2.0, fixed sample to match multi-targeting and sample database.
-1. v3.0.2 - `CreateInfrastructure` also creates the schema - issue #8
-1. v3.0.1 - Added `DisableRemoveExpired` configuration; if `TRUE`, the cache instance won't delete expired items.
-1. v3.0
- 1. [BREAKING CHANGE] - Direct instantiation not preferred.
- 2. Single-threaded loop remover.
-1. v2.0.x - Updated everything to .NET 5.0, more detailed sample project.
-1. v1.0.8 - Updated to the latest dependencies.
+- [v5.0.1](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/5.0.1) - Added unit tests and improve multitarget frameworks
+- [v5.0.0](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/5.0.0) - Added support for .NET 9
+ - [BREAKING CHANGE] - Dropped support for .NETStandard2.0
+ - [BREAKING CHANGE] - Supports .NET 9, .NET 8 and .NET 6
+- [v4.0.1](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/4.0.1) - Added support for .NET 7
+ - [BREAKING CHANGE] - Dropped support for .NET 5
+ - [BREAKING CHANGE] - Now uses stored procedures (won't work with PostgreSQL <= 10, use version 3)
+- [v3.1.2](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/v3.1.2) - Removed dependency for `IHostApplicationLifetime` if not supported on the platform (e.g., AWS) - issue #28
+- [v3.1.0](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/3.1.0) - Added log messages on `Debug` Level, multitarget .NET 5 and .NET 6, dropped support for netstandard2.0, fixed sample to match multi-targeting and sample database.
+- [v3.0.2](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/v3.0.2) - `CreateInfrastructure` also creates the schema - issue #8
+- [v3.0.1](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/v3.0.1) - Added `DisableRemoveExpired` configuration; if `TRUE`, the cache instance won't delete expired items.
+- [v3.0](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/3.0.0) - [BREAKING CHANGE] - Direct instantiation not preferred. Single-threaded loop remover.
+- [v2.0.x commits](https://github.com/leonibr/community-extensions-cache-postgres/commits/main?utf8=%E2%9C%93&search=v2.0) - Updated everything to .NET 5.0, more detailed sample project.
+- [v1.0.8](https://github.com/leonibr/community-extensions-cache-postgres/releases/tag/v1.0.8) - Updated to the latest dependencies.
## Contributing
@@ -304,7 +343,3 @@ Please check the [Github issues page](https://github.com/leonibr/community-exten
### Known issues:
- The library does not perform well with large objects in the cache due to the nature of PostgreSQL, large objects may cause performance bottlenecks.
-
----
-
-### This is a fork from [repo](https://github.com/wullemsb/Extensions.Caching.PostgreSQL)
diff --git a/docs/OptionsDetails.md b/docs/OptionsDetails.md
new file mode 100644
index 0000000..76bf838
--- /dev/null
+++ b/docs/OptionsDetails.md
@@ -0,0 +1,102 @@
+# Options Details & Usage Guidance
+
+[β Back to Configuration Options in README](../README.md#configuration-options)
+
+## `ConnectionString`, `SchemaName`, `TableName`
+
+- **What they do:**
+ Standard DB connection and naming options.
+- **When to use:**
+ - Always set `ConnectionString`.
+ - Customize `SchemaName`/`TableName` if you want to use non-default names or schemas.
+
+## 1. `DisableRemoveExpired`
+
+- **What it does:**
+ If `true`, this instance will not automatically remove expired cache items in the background.
+- **When to use:**
+ - You have multiple app instances and want only one to perform cleanup (set `true` on all but one).
+ - You want to handle expired item cleanup yourself.
+- **Pros:**
+ - Reduces DB load if you have many instances.
+- **Cons:**
+ - If all instances have this set to `true`, expired items will accumulate unless you remove them manually.
+- **Recommendation:**
+ - For single-instance deployments, leave as `false`.
+ - For multi-instance, set `true` on all but one instance.
+
+## 2. `UpdateOnGetCacheItem`
+
+- **What it does:**
+ If `true`, reading a cache item with sliding expiration will update its expiration in the database.
+- **When to use:**
+ - Leave as `true` for most scenarios.
+ - Set to `false` if you explicitly call `IDistributedCache.Refresh` (e.g., with ASP.NET Core Session).
+- **Pros:**
+ - Ensures sliding expiration works automatically.
+- **Cons:**
+ - Slightly more DB writes on cache reads.
+- **Recommendation:**
+ - Set to `false` only if you manage sliding expiration yourself.
+
+## 3. `ReadOnlyMode`
+
+- **What it does:**
+ If `true`, disables all write operations (including sliding expiration updates).
+- **When to use:**
+ - Your database user has only read permissions.
+ - You want to ensure the cache is never modified by this app.
+- **Pros:**
+ - Prevents accidental writes.
+ - Can improve performance (no write queries).
+- **Cons:**
+ - Sliding expiration is disabled; only absolute expiration works.
+ - Cache cannot be updated or cleared by this instance.
+- **Recommendation:**
+ - Use for read-only replicas or when you want strict read-only cache access.
+
+## 4. `CreateInfrastructure`
+
+- **What it does:**
+ If `true`, creates the schema and table for the cache if they do not exist.
+- **When to use:**
+ - You want the app to auto-provision the cache table/schema.
+- **Pros:**
+ - Simplifies setup.
+- **Cons:**
+ - May not be desirable in environments with strict DB change controls.
+- **Recommendation:**
+ - Set to `false` if you want to manage DB schema manually.
+
+## 5. `ExpiredItemsDeletionInterval`
+
+- **What it does:**
+ Sets how often the background process checks for and deletes expired items.
+- **When to use:**
+ - Adjust for your cache churn and DB performance needs.
+- **Pros:**
+ - Lower intervals mean expired items are removed sooner.
+- **Cons:**
+ - Too frequent can increase DB load; too infrequent can leave expired data longer.
+- **Recommendation:**
+ - Default (30 min) is suitable for most; minimum is 5 min.
+
+---
+
+## Custom Configuration
+
+```csharp
+services.AddDistributedPostgreSqlCache(options =>
+{
+ options.ConnectionString = configuration["CacheConnectionString"];
+ options.SchemaName = "my_schema";
+ options.TableName = "my_cache_table";
+ options.DisableRemoveExpired = true; // Only if another instance is cleaning up
+ options.CreateInfrastructure = false; // If you manage schema manually
+ options.ExpiredItemsDeletionInterval = TimeSpan.FromMinutes(15);
+ options.UpdateOnGetCacheItem = false; // If you call Refresh explicitly
+ options.ReadOnlyMode = false; // Set true for read-only DB users
+});
+```
+
+[β Back to Configuration Options in README](../README.md#configuration-options)
diff --git a/docs/PerformanceTesting.md b/docs/PerformanceTesting.md
new file mode 100644
index 0000000..cb49b4b
--- /dev/null
+++ b/docs/PerformanceTesting.md
@@ -0,0 +1,399 @@
+# Performance Testing in CI/CD
+
+This document explains how performance testing is integrated into our GitHub Actions CI/CD pipeline for the PostgreSQL distributed cache library.
+
+## Overview
+
+We use a **hybrid approach** with three complementary workflows plus **historical trend analysis**:
+
+1. **Scheduled Monitoring** - Regular performance tracking with historical storage
+2. **PR Validation** - Optional performance testing with regression detection
+3. **Release Validation** - Comprehensive testing before releases
+4. **Interactive Dashboard** - Historical trends and regression analysis
+
+## π Performance Dashboard
+
+### Live Dashboard
+
+Once configured, your performance dashboard will be available at:
+**`https://leonibr.github.io/community-extensions-cache-postgres/benchmarks/`**
+
+### Features
+
+- **π Historical Trends**: Interactive charts showing performance over time
+- **β‘ Regression Detection**: Automatic alerts when performance degrades >20% (PRs) or >50% (scheduled)
+- **π Drill-down Analysis**: Click data points to see specific commit details
+- **π― Multiple Metrics**: Time, memory allocation, and percentile tracking
+
+## Workflows
+
+### 1. Scheduled Performance Benchmarks
+
+**File:** `.github/workflows/benchmarks-scheduled.yml`
+
+**Triggers:**
+
+- **Schedule:** Monday and Thursday at 2 AM UTC
+- **Manual:** Via workflow dispatch in GitHub Actions UI
+- **Automatic:** When code changes in `Extensions.Caching.PostgreSql/` or `Benchmarks/`
+
+**What it does:**
+
+- Runs core operations benchmark by default
+- Can run specific benchmarks or full suite via manual trigger
+- **Stores results in historical database**
+- **Updates live dashboard automatically**
+- Creates GitHub job summaries with performance data
+- **Triggers alerts on >50% performance degradation**
+
+**Usage:**
+
+```bash
+# Runs automatically, or trigger manually from GitHub Actions UI
+# Select which benchmarks to run: core, datasize, expiration, concurrency, bulk, or all
+```
+
+### 2. PR Performance Validation
+
+**File:** `.github/workflows/benchmarks-pr.yml`
+
+**Triggers:**
+
+- When PR touches performance-sensitive code
+- Only runs when explicitly requested
+
+**How to trigger:**
+
+1. **Option 1:** Add `performance` label to your PR
+2. **Option 2:** Include `[perf]` in your PR title
+
+**What it does:**
+
+- Runs core operations benchmark (fastest)
+- **Compares results against main branch baseline**
+- **Triggers alerts on >20% performance degradation**
+- Posts results with historical context as PR comment
+- Provides guidance for additional testing
+- Stores detailed results for 14 days
+
+**Example PR titles:**
+
+- `[perf] Optimize connection pooling`
+- `Fix caching logic [perf]`
+
+### 3. Release Performance Validation
+
+**File:** `.github/workflows/benchmarks-release.yml`
+
+**Triggers:**
+
+- **Automatic:** When a release is published
+- **Manual:** Via workflow dispatch
+
+**What it does:**
+
+- Runs comprehensive benchmark suite (all 5 benchmark types)
+- **Updates historical dashboard with release data**
+- Generates detailed performance report
+- Updates release notes with performance summary
+- Creates performance review issue
+- Stores results for 1 year
+- Performs basic regression analysis
+
+## Understanding Results
+
+### Historical Trend Analysis
+
+The dashboard provides several views for performance analysis:
+
+**π Trend Charts:**
+
+- Performance over time with commit correlation
+- Multiple metric visualization (time, memory, percentiles)
+
+**π Regression Detection:**
+
+- Automatic highlighting of performance degradations
+- Stablished thresholds (20% for PRs, 50% for scheduled runs)
+- Commit-level correlation for root cause analysis
+
+**π Comparative Analysis:**
+
+- Baseline comparisons for PR validation
+- Release-to-release performance tracking
+- Cross-benchmark correlation analysis
+
+### Benchmark Types
+
+| Benchmark | Purpose | Typical Runtime | Dashboard Chart |
+| ------------- | -------------------------------------------- | --------------- | -------------------- |
+| `core` | Basic operations (Get, Set, Delete, Refresh) | ~5-10 minutes | Real-time trends |
+| `datasize` | Performance with 1KB to 1MB payloads | ~10-15 minutes | Size impact analysis |
+| `expiration` | Different expiration strategies | ~10-15 minutes | Strategy comparison |
+| `concurrency` | 2-16 concurrent operations | ~15-20 minutes | Scalability trends |
+| `bulk` | Batch operations (10-500 items) | ~15-25 minutes | Throughput analysis |
+
+### Key Metrics
+
+- **Mean**: Average execution time per operation (primary trend metric)
+- **Error**: Standard error of measurements (confidence indicator)
+- **StdDev**: Standard deviation (consistency indicator)
+- **P90/P95**: 90th/95th percentile response times (latency analysis)
+- **Allocated**: Memory allocated per operation (memory trend tracking)
+- **Ratio**: Performance relative to baseline (regression detection)
+
+### Performance Thresholds
+
+π’ **Good Performance:**
+
+- Core operations < 50ms average
+- Memory allocations stable or decreasing
+- Low standard deviation (< 10% of mean)
+- **Dashboard shows green trend lines**
+
+π‘ **Acceptable Performance:**
+
+- Core operations 50-100ms average
+- Minimal memory growth (< 5% per release)
+- Consistent across runs (StdDev < 20% of mean)
+- **Dashboard shows yellow warning indicators**
+
+π΄ **Performance Issues:**
+
+- Core operations > 100ms average
+- Significant memory increases (> 10% per release)
+- High variability between runs (StdDev > 30% of mean)
+- **Dashboard shows red alerts and regression markers**
+
+## Best Practices
+
+### Attention Contributors
+
+1. **Use labels wisely:** Only add `performance` label when you suspect performance impact
+2. **Test locally first:** Run `dotnet run --configuration Release` in the Benchmarks folder
+3. **Compare results:** Look at ratios and trends, not just absolute numbers
+4. **Monitor allocations:** Watch for memory allocation increases
+5. **Check dashboard:** Review historical context before and after changes
+6. **Investigate alerts:** Don't ignore regression warnings in PR comments
+
+## Dashboard Usage Guide
+
+### Viewing Trends
+
+1. **Access Dashboard:** Visit GitHub Pages URL
+2. **Select Benchmark:** Each benchmark type has its own chart section
+3. **Analyze Trends:**
+ - Hover over data points for detailed information
+ - Look for patterns and correlations with commits
+ - Identify regression points and improvements
+
+### Interpreting Charts
+
+**Time Series Analysis:**
+
+- X-axis: Commit chronology / timestamps
+- Y-axis: Performance metric (logarithmic scale)
+- Data points: Individual benchmark runs
+- Trend lines: Moving averages and regression analysis
+
+**Alert Indicators:**
+
+- π΄ Red markers: Significant regressions detected
+- π‘ Yellow markers: Minor performance changes
+- π’ Green markers: Performance improvements
+
+### Regression Investigation
+
+When the dashboard shows performance regressions:
+
+1. **Identify the commit:** Click the regression marker
+2. **Review changes:** Examine the code changes in that commit
+3. **Correlate impact:** Look at the magnitude of regression
+4. **Check consistency:** Verify the regression across multiple runs
+5. **Investigate locally:** Reproduce the issue in development environment
+
+## Local Development
+
+### Running Benchmarks Locally
+
+```bash
+cd Benchmarks
+
+# Run all benchmarks
+dotnet run --configuration Release
+
+# Run specific benchmark
+dotnet run --configuration Release -- core
+dotnet run --configuration Release -- datasize
+dotnet run --configuration Release -- expiration
+dotnet run --configuration Release -- concurrency
+dotnet run --configuration Release -- bulk
+```
+
+### Prerequisites
+
+- .NET 9.0 SDK
+- Docker (for PostgreSQL TestContainer)
+- At least 4GB RAM available
+- x64 platform (recommended)
+
+### Understanding Local vs CI Results
+
+**Local Environment:**
+
+- Your specific hardware
+- Fewer variables
+- More consistent runs
+- Better for development
+- **Use for detailed analysis**
+
+**CI Environment:**
+
+- GitHub Actions Ubuntu runner
+- Shared/virtualized resources
+- Some variability expected
+- Good for trend analysis
+- **Use for historical tracking**
+
+**Dashboard Integration:**
+
+- Shows both environments when available
+- Clearly labels data sources
+- Provides context for result interpretation
+
+## Troubleshooting
+
+### Dashboard Issues
+
+**Dashboard not loading:**
+
+```bash
+# Check GitHub Pages status
+# Verify gh-pages branch exists
+# Confirm GitHub Pages is enabled in repository settings
+```
+
+**No chart data showing:**
+
+```bash
+# Run the setup workflow first
+# Execute at least one benchmark
+# Wait 5-10 minutes for deployment
+# Check browser console for JavaScript errors
+```
+
+**Charts showing errors:**
+
+```bash
+# Verify JSON data files exist in gh-pages branch
+# Check that benchmark JSON output format is correct
+# Ensure github-action-benchmark step is running successfully
+```
+
+### Common Issues
+
+**Benchmark fails to start:**
+
+```bash
+# Check Docker is running
+docker ps
+
+# Verify .NET version
+dotnet --version
+
+# Ensure BenchmarkDotNet generates JSON output
+# Check file paths in workflow configuration
+```
+
+**High variability in results:**
+
+- Normal for CI environment
+- Focus on trends over absolute values
+- Consider multiple runs for critical changes
+- Use dashboard trend analysis for patterns
+
+**Regression alerts not firing:**
+
+- Check alert thresholds in workflow files
+- Verify baseline data exists for comparison
+- Ensure github-action-benchmark step is configured correctly
+
+### Getting Help
+
+1. **Check workflow logs:** GitHub Actions provides detailed execution logs
+2. **Review dashboard console:** Browser developer tools for frontend issues
+3. **Examine gh-pages branch:** Verify data structure and content
+4. **Test locally:** Try reproducing the issue in development environment
+5. **Create issue:** Include benchmark results, dashboard screenshots, and environment details
+
+## Performance History
+
+### Viewing Historical Data
+
+- **Live Dashboard:** Interactive charts with full history
+- **GitHub Pages:** Automatic updates with each benchmark run
+- **Artifacts:** Download from GitHub Actions runs for offline analysis
+- **Job Summaries:** View in GitHub Actions UI with dashboard links
+- **Release Notes:** Performance summaries with dashboard references
+- **Issues:** Performance review issues with historical context
+
+### Exporting Data
+
+```bash
+# Data is stored in JSON format in gh-pages branch
+# Each benchmark type has its own data file:
+# - core-benchmark.json
+# - datasize-benchmark.json
+# - expiration-benchmark.json
+# - concurrency-benchmark.json
+# - bulk-benchmark.json
+
+# Clone gh-pages branch for offline analysis
+git clone -b gh-pages https://github.com/leonibr/community-extensions-cache-postgres.git dashboard-data
+```
+
+### Comparing Performance Across Versions
+
+The dashboard automatically provides:
+
+- **Release markers:** Special indicators for tagged releases
+- **Commit correlation:** Direct links to code changes
+- **Trend analysis:** Moving averages and regression detection
+- **Baseline tracking:** Consistent baseline comparisons
+
+## Advanced Configuration
+
+### Customizing Alert Thresholds
+
+Edit the workflow files to adjust sensitivity:
+
+```yaml
+# In benchmarks-scheduled.yml
+alert-threshold: '150%' # Trigger at 50% degradation
+
+# In benchmarks-pr.yml
+alert-threshold: '120%' # Trigger at 20% degradation (more sensitive)
+```
+
+### Adding Custom Metrics
+
+To track additional metrics:
+
+1. Modify BenchmarkDotNet configuration in `Program.cs`
+2. Update dashboard JavaScript to handle new metrics
+3. Adjust chart configurations for optimal visualization
+
+### Dashboard Customization
+
+The dashboard HTML can be customized by:
+
+1. Modifying the setup workflow template
+2. Editing the generated `index.html` in gh-pages branch
+3. Adding custom CSS/JavaScript for enhanced visualizations
+4. Integrating with external monitoring tools
+
+---
+
+**Questions or suggestions?** Open an issue or discussion in the repository.
+
+**Need help with setup?** Run the "Setup Benchmark Dashboard" workflow for guided initialization.