Compare commits

...

25 Commits

Author SHA1 Message Date
Arunavo Ray
0d63fd4dae Added more docs 2025-10-31 09:22:55 +05:30
Arunavo Ray
109958342d updated docs 2025-10-31 09:17:28 +05:30
Arunavo Ray
491546a97c added basic nix pack 2025-10-31 09:00:18 +05:30
ARUNAVO RAY
7a3f734728 Merge pull request #142 from RayLabsHQ/fix/issue-141-duplicate-issues-on-sync
fix: add metadata field to repositories table to prevent duplicate issues on sync
2025-10-31 08:51:34 +05:30
Arunavo Ray
d59a07a8c5 fix: add metadata field to repositories table to prevent duplicate issues on sync
Fixes #141

The repository metadata field was missing from the database schema, which
caused the metadata sync state (issues, PRs, releases, etc.) to not persist.
This resulted in duplicate issues being created every time a repository was
synced because the system couldn't track what had already been mirrored.

Changes:
- Added metadata text field to repositories table in schema
- Added metadata field to repositorySchema Zod validation
- Generated database migration 0008_serious_thena.sql

Root cause analysis:
1. Code tried to read/write repository.metadata to track mirrored components
2. The metadata field didn't exist in the database schema
3. On sync, metadataState.components.issues was always false
4. This triggered re-mirroring of all issues, creating duplicates

The fix ensures metadata state persists between mirrors and syncs, preventing
duplicate metadata (issues, PRs, releases) from being created in Gitea.
2025-10-30 10:58:48 +05:30
Arunavo Ray
5a77ae5084 v3.8.10 2025-10-30 10:54:56 +05:30
ARUNAVO RAY
dcb5bd80e3 Merge pull request #138 from RayLabsHQ/issue-132-org-repo-duplicates 2025-10-30 07:11:34 +05:30
Arunavo Ray
3b8fc99f06 workaround to get rid of unknown/unknown in OS arch 2025-10-29 22:01:40 +05:30
Arunavo Ray
bda8d10f10 ci: build arm64 images in PR pipeline 2025-10-29 21:51:37 +05:30
Arunavo Ray
0fe7b433d6 added missing hero_logo.png 2025-10-27 19:43:00 +05:30
Arunavo Ray
8d96e176b4 fix: prevent duplicate orgs and repos 2025-10-27 08:44:45 +05:30
Arunavo Ray
af9bc861cf fixed: Sort order in releases #129 2025-10-27 07:54:38 +05:30
ARUNAVO RAY
ab4bbea9fd Merge pull request #136 from RayLabsHQ/fix/metadata-sync-config-change
fix: sync metadata after config toggles
2025-10-27 07:45:12 +05:30
ARUNAVO RAY
fbd4b3739e Merge pull request #137 from RayLabsHQ/docs/authentik-oidc-notes
Added basic docs on SSO/OIDC
2025-10-26 19:54:53 +05:30
Arunavo Ray
395e71164f Added basic docs on SSO/OIDC 2025-10-26 19:52:44 +05:30
Arunavo Ray
99c277e2ee v3.8.10 | Fixed SSO issues 2025-10-26 19:06:36 +05:30
ARUNAVO RAY
9287e0d29b Merge pull request #135 from RayLabsHQ/fix/authentik-issuer-mismatch
auth: preserve issuer formatting for OIDC
2025-10-26 19:05:54 +05:30
Arunavo Ray
f2f2bafc39 "better-auth": "1.4.0-beta.13" 2025-10-26 18:37:06 +05:30
Arunavo Ray
5876198b5e Added missing DB fields 2025-10-26 18:36:20 +05:30
Arunavo Ray
e46bf381c7 auth: trust email verification from sso providers 2025-10-26 08:45:47 +05:30
Arunavo Ray
3bf0ccf207 fix: sync metadata after config toggles 2025-10-26 08:41:28 +05:30
Arunavo Ray
e41b4ffc56 auth: preserve issuer formatting for OIDC 2025-10-26 07:49:42 +05:30
Arunavo Ray
a9dd646573 v3.8.9 2025-10-25 09:04:14 +05:30
ARUNAVO RAY
e2160aabcd Merge pull request #130 from bwees/main 2025-10-25 07:24:41 +05:30
Brandon Wees
5d085e02bf fix: rename repo count in dashboard 2025-10-24 15:45:29 -05:00
51 changed files with 9387 additions and 261 deletions

1
.envrc Normal file
View File

@@ -0,0 +1 @@
use flake

View File

@@ -101,26 +101,30 @@ jobs:
# Build and push Docker image
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
platforms: ${{ github.event_name == 'pull_request' && 'linux/amd64' || 'linux/amd64,linux/arm64' }}
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false # Disable provenance to avoid unknown/unknown
sbom: false # Disable sbom to avoid unknown/unknown
# Load image locally for security scanning (PRs only)
- name: Load image for scanning
if: github.event_name == 'pull_request'
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64
load: true
tags: gitea-mirror:scan
cache-from: type=gha
provenance: false # Disable provenance to avoid unknown/unknown
sbom: false # Disable sbom to avoid unknown/unknown
# Wait for image to be available in registry
- name: Wait for image availability
@@ -169,8 +173,8 @@ jobs:
- BETTER_AUTH_TRUSTED_ORIGINS=http://localhost:4321
\`\`\`
> 💡 **Note:** PR images are tagged as \`pr-<number>\` and only built for \`linux/amd64\` to speed up CI.
> Production images (\`latest\`, version tags) are multi-platform (\`linux/amd64\`, \`linux/arm64\`).
> 💡 **Note:** PR images are tagged as \`pr-<number>\` and built for both \`linux/amd64\` and \`linux/arm64\`.
> Production images (\`latest\`, version tags) use the same multi-platform set.
---
📦 View in [GitHub Packages](https://github.com/${{ github.repository }}/pkgs/container/gitea-mirror)`;

41
.github/workflows/nix-build.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Nix Build and Cache
on:
push:
branches: [main]
tags:
- 'v*'
pull_request:
branches: [main]
jobs:
build:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v24
with:
extra_nix_config: |
experimental-features = nix-command flakes
- uses: cachix/cachix-action@v12
with:
name: gitea-mirror # Your cache name
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- name: Build package
run: nix build --print-build-logs
- name: Check flake
run: nix flake check
- name: Test run (dry run)
run: |
# Just verify the binary exists and is executable
test -x ./result/bin/gitea-mirror
./result/bin/gitea-mirror --version || echo "Version check skipped"

5
.gitignore vendored
View File

@@ -32,3 +32,8 @@ certs/*.pem
certs/*.cer
!certs/README.md
# Nix build artifacts
result
result-*
.direnv/

193
DISTRIBUTION_SUMMARY.md Normal file
View File

@@ -0,0 +1,193 @@
# Nix Distribution - Ready to Use! 🎉
## Current Status: ✅ WORKS NOW
Your Nix package is **already distributable**! Users can run it directly from GitHub without any additional setup on your end.
## How Users Will Use It
### Simple: Just Run From GitHub
```bash
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
```
That's it! No releases, no CI, no infrastructure needed. It works right now.
---
## What Happens When They Run This?
1. **Nix fetches** your repo from GitHub
2. **Nix reads** `flake.nix` and `flake.lock`
3. **Nix builds** the package on their machine
4. **Nix runs** the application
5. **Result cached** in `/nix/store` for reuse
---
## Do You Need CI or Releases?
### For Basic Usage: **NO**
Users can already use it from GitHub. No CI or releases required.
### For Better UX: **Recommended**
Set up binary caching so users don't compile from source.
---
## Next Steps (Optional but Recommended)
### Option 1: Add Binary Cache (5 minutes)
**Why:** Users download pre-built binaries instead of compiling (much faster!)
**How:**
1. Create free account at https://cachix.org/
2. Create cache named `gitea-mirror`
3. Add GitHub secret: `CACHIX_AUTH_TOKEN`
4. GitHub Actions workflow already created at `.github/workflows/nix-build.yml`
5. Add to your docs:
```bash
# Users run once
cachix use gitea-mirror
# Then they get fast binary downloads
nix run github:RayLabsHQ/gitea-mirror
```
### Option 2: Release Versioning (2 minutes)
**Why:** Users can pin to specific versions
**How:**
```bash
# When ready to release
git tag v3.8.11
git push origin v3.8.11
# Users can then pin to this version
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
```
No additional CI needed - tags work automatically with flakes!
### Option 3: Submit to nixpkgs (Long Term)
**Why:** Maximum discoverability and trust
**When:** After package is stable and well-tested
**How:** Submit PR to https://github.com/NixOS/nixpkgs
---
## Files Created
### Essential (Already Working)
- ✅ `flake.nix` - Package definition
- ✅ `flake.lock` - Dependency lock file
- ✅ `.envrc` - direnv integration
### Documentation
- ✅ `NIX.md` - Quick reference for users
- ✅ `docs/NIX_DEPLOYMENT.md` - Complete deployment guide
- ✅ `docs/NIX_DISTRIBUTION.md` - Distribution guide for you (maintainer)
- ✅ `README.md` - Updated with Nix instructions
### CI (Optional, Already Set Up)
- ✅ `.github/workflows/nix-build.yml` - Builds + caches to Cachix
### Updated
- ✅ `.gitignore` - Added Nix artifacts
---
## Comparison: Your Distribution Options
| Setup | Time | User Experience | What You Need |
|-------|------|----------------|---------------|
| **Direct GitHub** | 0 min ✅ | Slow (build from source) | Nothing! Works now |
| **+ Cachix** | 5 min | Fast (binary download) | Cachix account + token |
| **+ Git Tags** | 2 min | Versionable | Just push tags |
| **+ nixpkgs** | Hours | Official/Trusted | PR review process |
**Recommendation:** Start with Direct GitHub (already works!), add Cachix this week for better UX.
---
## Testing Your Distribution
You can test it right now:
```bash
# Test direct GitHub usage
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Test with specific commit
nix run github:RayLabsHQ/gitea-mirror/$(git rev-parse HEAD)
# Validate flake
nix flake check
```
---
## User Documentation Locations
Users will find instructions in:
1. **README.md** - Installation section (already updated)
2. **NIX.md** - Quick reference
3. **docs/NIX_DEPLOYMENT.md** - Detailed guide
All docs include the correct commands with experimental features flags.
---
## When to Release New Versions
### For Git Tag Releases:
```bash
# 1. Update version in package.json
vim package.json
# 2. Update version in flake.nix (line 17)
vim flake.nix # version = "3.8.12";
# 3. Commit and tag
git add package.json flake.nix
git commit -m "chore: bump version to v3.8.12"
git tag v3.8.12
git push origin main
git push origin v3.8.12
```
Users can then use: `nix run github:RayLabsHQ/gitea-mirror/v3.8.12`
### No Release Needed For:
- Bug fixes
- Small changes
- Continuous updates
Users can always use latest from main: `nix run github:RayLabsHQ/gitea-mirror`
---
## Summary
**✅ Ready to distribute RIGHT NOW**
- Just commit and push your `flake.nix`
- Users can run directly from GitHub
- No CI, releases, or infrastructure required
**🚀 Recommended next: Add Cachix (5 minutes)**
- Much better user experience
- Workflow already created
- Free for public projects
**📦 Optional later: Submit to nixpkgs**
- Maximum discoverability
- Official Nix repository
- Do this once package is stable
See `docs/NIX_DISTRIBUTION.md` for complete details!

189
NIX.md Normal file
View File

@@ -0,0 +1,189 @@
# Nix Deployment Quick Reference
## TL;DR
```bash
# From GitHub (no clone needed!)
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Or from local clone
nix run --extra-experimental-features 'nix-command flakes' .#gitea-mirror
```
Secrets auto-generate, database auto-initializes, and the web UI starts at http://localhost:4321.
**Note:** If you have flakes enabled in your nix config, you can omit `--extra-experimental-features 'nix-command flakes'`
---
## Installation Options
### 1. Run Without Installing (from GitHub)
```bash
# Latest version from main branch
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Pin to specific version
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
```
### 2. Install to Profile
```bash
# Install from GitHub
nix profile install --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Run the installed binary
gitea-mirror
```
### 3. Use Local Clone
```bash
# Clone and run
git clone https://github.com/RayLabsHQ/gitea-mirror.git
cd gitea-mirror
nix run --extra-experimental-features 'nix-command flakes' .#gitea-mirror
```
### 4. NixOS System Service
```nix
# configuration.nix
{
inputs.gitea-mirror.url = "github:RayLabsHQ/gitea-mirror";
services.gitea-mirror = {
enable = true;
betterAuthUrl = "https://mirror.example.com"; # For production
openFirewall = true;
};
}
```
### 5. Development (Local Clone)
```bash
nix develop --extra-experimental-features 'nix-command flakes'
# or
direnv allow # Handles experimental features automatically
```
---
## Enable Flakes Permanently (Recommended)
To avoid typing `--extra-experimental-features` every time, add to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
---
## What Gets Auto-Generated?
On first run, the wrapper automatically:
1. Creates `~/.local/share/gitea-mirror/` (or `$DATA_DIR`)
2. Generates `BETTER_AUTH_SECRET``.better_auth_secret`
3. Generates `ENCRYPTION_SECRET``.encryption_secret`
4. Initializes SQLite database
5. Runs startup recovery and repair scripts
6. Starts the application
---
## Key Commands
```bash
# Database management
gitea-mirror-db init # Initialize database
gitea-mirror-db check # Health check
gitea-mirror-db fix # Fix issues
# Development (add --extra-experimental-features 'nix-command flakes' if needed)
nix develop # Enter dev shell
nix build # Build package
nix flake check # Validate flake
nix flake update # Update dependencies
```
---
## Environment Variables
All vars from `docker-compose.alt.yml` are supported:
```bash
DATA_DIR="$HOME/.local/share/gitea-mirror"
PORT=4321
HOST="0.0.0.0"
BETTER_AUTH_URL="http://localhost:4321"
# Secrets (auto-generated if not set)
BETTER_AUTH_SECRET=auto-generated
ENCRYPTION_SECRET=auto-generated
# Concurrency (for perfect ordering, set both to 1)
MIRROR_ISSUE_CONCURRENCY=3
MIRROR_PULL_REQUEST_CONCURRENCY=5
```
---
## NixOS Module Options
```nix
services.gitea-mirror = {
enable = true;
package = ...; # Override package
dataDir = "/var/lib/gitea-mirror"; # Data location
user = "gitea-mirror"; # Service user
group = "gitea-mirror"; # Service group
host = "0.0.0.0"; # Bind address
port = 4321; # Listen port
betterAuthUrl = "http://..."; # External URL
betterAuthTrustedOrigins = "..."; # CORS origins
mirrorIssueConcurrency = 3; # Concurrency
mirrorPullRequestConcurrency = 5; # Concurrency
environmentFile = null; # Optional secrets file
openFirewall = true; # Open firewall
};
```
---
## Comparison: Docker vs Nix
| Feature | Docker | Nix |
|---------|--------|-----|
| **Config Required** | BETTER_AUTH_SECRET | None (auto-generated) |
| **Startup** | `docker-compose up` | `nix run .#gitea-mirror` |
| **Service** | Docker daemon | systemd (NixOS) |
| **Updates** | `docker pull` | `nix flake update` |
| **Reproducible** | Image-based | Hash-based |
---
## Full Documentation
- **[docs/NIX_DEPLOYMENT.md](docs/NIX_DEPLOYMENT.md)** - Complete deployment guide
- NixOS module configuration
- Home Manager integration
- Production deployment examples
- Migration from Docker
- Troubleshooting guide
- **[docs/NIX_DISTRIBUTION.md](docs/NIX_DISTRIBUTION.md)** - Distribution guide for maintainers
- How users consume the package
- Setting up binary cache (Cachix)
- Releasing new versions
- Submitting to nixpkgs
---
## Key Features
- **Zero-config deployment** - Runs immediately without setup
- **Auto-secret generation** - Secure secrets created and persisted
- **Startup recovery** - Handles interrupted jobs automatically
- **Graceful shutdown** - Proper signal handling
- **Health checks** - Built-in monitoring support
- **Security hardening** - NixOS module includes systemd protections
- **Docker parity** - Same behavior as `docker-compose.alt.yml`

View File

@@ -150,6 +150,38 @@ bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/Proxmo
See the [Proxmox VE Community Scripts](https://community-scripts.github.io/ProxmoxVE/scripts?id=gitea-mirror) for more details.
### Nix/NixOS
Zero-configuration deployment with Nix:
```bash
# Run immediately - no setup needed!
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Or build and run locally
nix build --extra-experimental-features 'nix-command flakes'
./result/bin/gitea-mirror
# Or install to profile
nix profile install --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
gitea-mirror
```
**NixOS users** - add to your configuration:
```nix
{
inputs.gitea-mirror.url = "github:RayLabsHQ/gitea-mirror";
services.gitea-mirror = {
enable = true;
betterAuthUrl = "https://mirror.example.com";
openFirewall = true;
};
}
```
Secrets auto-generate, database auto-initializes. See [NIX.md](NIX.md) for quick reference or [docs/NIX_DEPLOYMENT.md](docs/NIX_DEPLOYMENT.md) for full documentation.
### Manual Installation
```bash
@@ -326,6 +358,8 @@ Enable users to sign in with external identity providers like Google, Azure AD,
https://your-domain.com/api/auth/sso/callback/{provider-id}
```
Need help? The [SSO & OIDC guide](docs/SSO-OIDC-SETUP.md) now includes a working Authentik walkthrough plus troubleshooting tips. If you upgraded from a version earlier than v3.8.10 and see `TypeError … url.startsWith` after the callback, delete the old provider and add it again using the Discover button (see [#73](https://github.com/RayLabsHQ/gitea-mirror/issues/73) and [#122](https://github.com/RayLabsHQ/gitea-mirror/issues/122)).
### 3. Header Authentication (Reverse Proxy)
Perfect for automatic authentication when using reverse proxies like Authentik, Authelia, or Traefik Forward Auth.

View File

@@ -36,7 +36,7 @@
"@types/react-dom": "^19.2.2",
"astro": "^5.14.8",
"bcryptjs": "^3.0.2",
"better-auth": "1.4.0-beta.12",
"better-auth": "1.4.0-beta.13",
"buffer": "^6.0.3",
"canvas-confetti": "^1.9.3",
"class-variance-authority": "^0.7.1",
@@ -150,11 +150,11 @@
"@babel/types": ["@babel/types@7.28.4", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1" } }, "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q=="],
"@better-auth/core": ["@better-auth/core@1.4.0-beta.12", "", { "dependencies": { "zod": "^4.1.5" }, "peerDependencies": { "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18", "better-call": "1.0.24", "better-sqlite3": "^12.4.1", "jose": "^6.1.0", "kysely": "^0.28.5", "nanostores": "^1.0.1" } }, "sha512-2GisAGuSVZS4gtnwP5Owk3RyC6GevZe9zcODTrtbwRCvBTrHUmu0j6bcklK9uNG8DaWDmzCK1+VGA5qIHzg5Pw=="],
"@better-auth/core": ["@better-auth/core@1.4.0-beta.13", "", { "dependencies": { "zod": "^4.1.5" }, "peerDependencies": { "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18", "better-call": "1.0.24", "jose": "^6.1.0", "kysely": "^0.28.5", "nanostores": "^1.0.1" } }, "sha512-EGySsNv6HQYnlRQDIa7otIMrwFoC0gGLxBum9lC6C3wAsF4l4pn/ECcdIriFpc9ewLb8mGkeMSpvjVBUBND6ew=="],
"@better-auth/sso": ["@better-auth/sso@1.4.0-beta.12", "", { "dependencies": { "@better-fetch/fetch": "1.1.18", "fast-xml-parser": "^5.2.5", "jose": "^6.1.0", "oauth2-mock-server": "^7.2.1", "samlify": "^2.10.1", "zod": "^4.1.5" }, "peerDependencies": { "better-auth": "1.4.0-beta.12" } }, "sha512-iuRuy59J3yXQihZJ34rqYClWyuVjSkxuBkdFblccKbOhNy7pmRO1lfmBMpyeth3ET5Cp0PDVV/z1XBbDcQp0LA=="],
"@better-auth/telemetry": ["@better-auth/telemetry@1.4.0-beta.12", "", { "dependencies": { "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18" }, "peerDependencies": { "@better-auth/core": "1.4.0-beta.12" } }, "sha512-pQ5HITRGXMHQPcPCDnz0xlxFqqxvpD4kQMvY6cdt1vDsPVePHAj9R3S318XEfaw3NAgtw3af/wCN6eBt2u4Kew=="],
"@better-auth/telemetry": ["@better-auth/telemetry@1.4.0-beta.13", "", { "dependencies": { "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18" }, "peerDependencies": { "@better-auth/core": "1.4.0-beta.13" } }, "sha512-910f+APALhhD79TiujzXp85Pnd2M3TlcTgBfiYF+mk3ouIkBJkl2N6D2ElcgwfiNTg50cFuTkP3AFPYioz8Arw=="],
"@better-auth/utils": ["@better-auth/utils@0.3.0", "", {}, "sha512-W+Adw6ZA6mgvnSnhOki270rwJ42t4XzSK6YWGF//BbVXL6SwCLWfyzBc1lN2m/4RM28KubdBKQ4X5VMoLRNPQw=="],
@@ -698,7 +698,7 @@
"before-after-hook": ["before-after-hook@4.0.0", "", {}, "sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ=="],
"better-auth": ["better-auth@1.4.0-beta.12", "", { "dependencies": { "@better-auth/core": "1.4.0-beta.12", "@better-auth/telemetry": "1.4.0-beta.12", "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18", "@noble/ciphers": "^2.0.0", "@noble/hashes": "^2.0.0", "@simplewebauthn/browser": "^13.1.2", "@simplewebauthn/server": "^13.1.2", "better-call": "1.0.24", "defu": "^6.1.4", "jose": "^6.1.0", "kysely": "^0.28.5", "nanostores": "^1.0.1", "zod": "^4.1.5" } }, "sha512-IvrSBmQkHgOinDh6JyJCoKwbMPmHpkmt98/0hBU9Nc0s7Y7u72AOx1Z35J2dRQxxX4SzvFQ9pHqlV6wPnm72Ww=="],
"better-auth": ["better-auth@1.4.0-beta.13", "", { "dependencies": { "@better-auth/core": "1.4.0-beta.13", "@better-auth/telemetry": "1.4.0-beta.13", "@better-auth/utils": "0.3.0", "@better-fetch/fetch": "1.1.18", "@noble/ciphers": "^2.0.0", "@noble/hashes": "^2.0.0", "@simplewebauthn/browser": "^13.1.2", "@simplewebauthn/server": "^13.1.2", "better-call": "1.0.24", "defu": "^6.1.4", "jose": "^6.1.0", "kysely": "^0.28.5", "nanostores": "^1.0.1", "zod": "^4.1.5" } }, "sha512-VOzbsCldupk2AdNfzDmpCVajX83nwITX8S9I8TdEUURgr3kB/CDVrsN6S8t0AClMnGgB4XaeKiXUNN30CCG4aA=="],
"better-call": ["better-call@1.0.24", "", { "dependencies": { "@better-auth/utils": "^0.3.0", "@better-fetch/fetch": "^1.1.4", "rou3": "^0.5.1", "set-cookie-parser": "^2.7.1", "uncrypto": "^0.1.3" } }, "sha512-iGqL29cstPp4xLD2MjKL1EmyAqQHjYS+cBMt4W27rPs3vf+kuqkVPA0NYaf7JciBOzVsJdNj4cbZWXC5TardWQ=="],

483
docs/NIX_DEPLOYMENT.md Normal file
View File

@@ -0,0 +1,483 @@
# Nix Deployment Guide
This guide covers deploying Gitea Mirror using Nix flakes. The Nix deployment follows the same minimal configuration philosophy as `docker-compose.alt.yml` - secrets are auto-generated, and everything else can be configured via the web UI.
## Prerequisites
- Nix 2.4+ installed
- For NixOS module: NixOS 23.05+
### Enable Flakes (Recommended)
To enable flakes permanently and avoid typing flags, add to `/etc/nix/nix.conf` or `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
**Note:** If you don't enable flakes globally, add `--extra-experimental-features 'nix-command flakes'` to all nix commands shown below.
## Quick Start (Zero Configuration!)
### Run Immediately - No Setup Required
```bash
# Run directly from the flake (local)
nix run --extra-experimental-features 'nix-command flakes' .#gitea-mirror
# Or from GitHub (once published)
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# If you have flakes enabled globally, simply:
nix run .#gitea-mirror
```
That's it! On first run:
- Secrets (`BETTER_AUTH_SECRET` and `ENCRYPTION_SECRET`) are auto-generated
- Database is automatically created and initialized
- Startup recovery and repair scripts run automatically
- Access the web UI at http://localhost:4321
Everything else (GitHub credentials, Gitea settings, mirror options) is configured through the web interface after signup.
### Development Environment
```bash
# Enter development shell with all dependencies
nix develop --extra-experimental-features 'nix-command flakes'
# Or use direnv for automatic environment loading (handles flags automatically)
echo "use flake" > .envrc
direnv allow
```
### Build and Install
```bash
# Build the package
nix build --extra-experimental-features 'nix-command flakes'
# Run the built package
./result/bin/gitea-mirror
# Install to your profile
nix profile install --extra-experimental-features 'nix-command flakes' .#gitea-mirror
```
## What Happens on First Run?
Following the same pattern as the Docker deployment, the Nix package automatically:
1. **Creates data directory**: `~/.local/share/gitea-mirror` (or `$DATA_DIR`)
2. **Generates secrets** (stored securely in data directory):
- `BETTER_AUTH_SECRET` - Session authentication (32-char hex)
- `ENCRYPTION_SECRET` - Token encryption (48-char base64)
3. **Initializes database**: SQLite database with Drizzle migrations
4. **Runs startup scripts**:
- Environment configuration loader
- Crash recovery for interrupted jobs
- Repository status repair
5. **Starts the application** with graceful shutdown handling
## NixOS Module - Minimal Deployment
### Simplest Possible Configuration
Add to your NixOS configuration (`/etc/nixos/configuration.nix`):
```nix
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
gitea-mirror.url = "github:RayLabsHQ/gitea-mirror";
};
outputs = { nixpkgs, gitea-mirror, ... }: {
nixosConfigurations.your-hostname = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
gitea-mirror.nixosModules.default
{
# That's it! Just enable the service
services.gitea-mirror.enable = true;
}
];
};
};
}
```
Apply with:
```bash
sudo nixos-rebuild switch
```
Access at http://localhost:4321, sign up (first user is admin), and configure everything via the web UI.
### Production Configuration
For production with custom domain and firewall:
```nix
{
services.gitea-mirror = {
enable = true;
host = "0.0.0.0";
port = 4321;
betterAuthUrl = "https://mirror.example.com";
betterAuthTrustedOrigins = "https://mirror.example.com";
openFirewall = true;
};
# Optional: Use with nginx reverse proxy
services.nginx = {
enable = true;
virtualHosts."mirror.example.com" = {
locations."/" = {
proxyPass = "http://127.0.0.1:4321";
proxyWebsockets = true;
};
enableACME = true;
forceSSL = true;
};
};
}
```
### Advanced: Manual Secret Management
If you prefer to manage secrets manually (e.g., with sops-nix or agenix):
1. Create a secrets file:
```bash
# /var/lib/gitea-mirror/secrets.env
BETTER_AUTH_SECRET=your-32-character-minimum-secret-key-here
ENCRYPTION_SECRET=your-encryption-secret-here
```
2. Reference it in your configuration:
```nix
{
services.gitea-mirror = {
enable = true;
environmentFile = "/var/lib/gitea-mirror/secrets.env";
};
}
```
### Full Configuration Options
```nix
{
services.gitea-mirror = {
enable = true;
package = gitea-mirror.packages.x86_64-linux.default; # Override package
dataDir = "/var/lib/gitea-mirror";
user = "gitea-mirror";
group = "gitea-mirror";
host = "0.0.0.0";
port = 4321;
betterAuthUrl = "https://mirror.example.com";
betterAuthTrustedOrigins = "https://mirror.example.com";
# Concurrency controls (match docker-compose.alt.yml)
mirrorIssueConcurrency = 3; # Set to 1 for perfect chronological order
mirrorPullRequestConcurrency = 5; # Set to 1 for perfect chronological order
environmentFile = null; # Optional secrets file
openFirewall = true;
};
}
```
## Service Management (NixOS)
```bash
# Start the service
sudo systemctl start gitea-mirror
# Stop the service
sudo systemctl stop gitea-mirror
# Restart the service
sudo systemctl restart gitea-mirror
# Check status
sudo systemctl status gitea-mirror
# View logs
sudo journalctl -u gitea-mirror -f
# Health check
curl http://localhost:4321/api/health
```
## Environment Variables
All variables from `docker-compose.alt.yml` are supported:
```bash
# === AUTO-GENERATED (Don't set unless you want specific values) ===
BETTER_AUTH_SECRET # Auto-generated, stored in data dir
ENCRYPTION_SECRET # Auto-generated, stored in data dir
# === CORE SETTINGS (Have good defaults) ===
DATA_DIR="$HOME/.local/share/gitea-mirror"
DATABASE_URL="file:$DATA_DIR/gitea-mirror.db"
HOST="0.0.0.0"
PORT="4321"
NODE_ENV="production"
# === BETTER AUTH (Override for custom domains) ===
BETTER_AUTH_URL="http://localhost:4321"
BETTER_AUTH_TRUSTED_ORIGINS="http://localhost:4321"
PUBLIC_BETTER_AUTH_URL="http://localhost:4321"
# === CONCURRENCY CONTROLS ===
MIRROR_ISSUE_CONCURRENCY=3 # Default: 3 (set to 1 for perfect order)
MIRROR_PULL_REQUEST_CONCURRENCY=5 # Default: 5 (set to 1 for perfect order)
# === CONFIGURE VIA WEB UI (Not needed at startup) ===
# GitHub credentials, Gitea settings, mirror options, scheduling, etc.
# All configured after signup through the web interface
```
## Database Management
The Nix package includes a database management helper:
```bash
# Initialize database (done automatically on first run)
gitea-mirror-db init
# Check database health
gitea-mirror-db check
# Fix database issues
gitea-mirror-db fix
# Reset users
gitea-mirror-db reset-users
```
## Home Manager Integration
For single-user deployments:
```nix
{ config, pkgs, ... }:
let
gitea-mirror = (import (fetchTarball "https://github.com/RayLabsHQ/gitea-mirror/archive/main.tar.gz")).packages.${pkgs.system}.default;
in {
home.packages = [ gitea-mirror ];
# Optional: Run as user service
systemd.user.services.gitea-mirror = {
Unit = {
Description = "Gitea Mirror Service";
After = [ "network.target" ];
};
Service = {
Type = "simple";
ExecStart = "${gitea-mirror}/bin/gitea-mirror";
Restart = "always";
Environment = [
"DATA_DIR=%h/.local/share/gitea-mirror"
"HOST=127.0.0.1"
"PORT=4321"
];
};
Install = {
WantedBy = [ "default.target" ];
};
};
}
```
## Docker Image from Nix (Optional)
You can also use Nix to create a Docker image:
```nix
# Add to flake.nix packages section
dockerImage = pkgs.dockerTools.buildLayeredImage {
name = "gitea-mirror";
tag = "latest";
contents = [ self.packages.${system}.default pkgs.cacert pkgs.openssl ];
config = {
Cmd = [ "${self.packages.${system}.default}/bin/gitea-mirror" ];
ExposedPorts = { "4321/tcp" = {}; };
Env = [
"DATA_DIR=/data"
"DATABASE_URL=file:/data/gitea-mirror.db"
];
Volumes = { "/data" = {}; };
};
};
```
Build and load:
```bash
nix build --extra-experimental-features 'nix-command flakes' .#dockerImage
docker load < result
docker run -p 4321:4321 -v gitea-mirror-data:/data gitea-mirror:latest
```
## Comparison: Docker vs Nix
Both deployment methods follow the same philosophy:
| Feature | Docker Compose | Nix |
|---------|---------------|-----|
| **Configuration** | Minimal (only BETTER_AUTH_SECRET) | Zero config (auto-generated) |
| **Secret Generation** | Auto-generated & persisted | Auto-generated & persisted |
| **Database Init** | Automatic on first run | Automatic on first run |
| **Startup Scripts** | Runs recovery/repair/env-config | Runs recovery/repair/env-config |
| **Graceful Shutdown** | Signal handling in entrypoint | Signal handling in wrapper |
| **Health Check** | Docker healthcheck | systemd timer (optional) |
| **Updates** | `docker pull` | `nix flake update && nixos-rebuild` |
## Troubleshooting
### Check Auto-Generated Secrets
```bash
# For standalone
cat ~/.local/share/gitea-mirror/.better_auth_secret
cat ~/.local/share/gitea-mirror/.encryption_secret
# For NixOS service
sudo cat /var/lib/gitea-mirror/.better_auth_secret
sudo cat /var/lib/gitea-mirror/.encryption_secret
```
### Database Issues
```bash
# Check if database exists
ls -la ~/.local/share/gitea-mirror/gitea-mirror.db
# Reinitialize (deletes all data!)
rm ~/.local/share/gitea-mirror/gitea-mirror.db
gitea-mirror-db init
```
### Permission Issues (NixOS)
```bash
sudo chown -R gitea-mirror:gitea-mirror /var/lib/gitea-mirror
sudo chmod 700 /var/lib/gitea-mirror
```
### Port Already in Use
```bash
# Change port
export PORT=8080
gitea-mirror
# Or in NixOS config
services.gitea-mirror.port = 8080;
```
### View Startup Logs
```bash
# Standalone (verbose output on console)
gitea-mirror
# NixOS service
sudo journalctl -u gitea-mirror -f --since "5 minutes ago"
```
## Updating
### Standalone Installation
```bash
# Update flake lock
nix flake update --extra-experimental-features 'nix-command flakes'
# Rebuild
nix build --extra-experimental-features 'nix-command flakes'
# Or update profile
nix profile upgrade --extra-experimental-features 'nix-command flakes' gitea-mirror
```
### NixOS
```bash
# Update input
sudo nix flake lock --update-input gitea-mirror --extra-experimental-features 'nix-command flakes'
# Rebuild system
sudo nixos-rebuild switch --flake .#your-hostname
```
## Migration from Docker
To migrate from Docker to Nix while keeping your data:
1. **Stop Docker container:**
```bash
docker-compose -f docker-compose.alt.yml down
```
2. **Copy data directory:**
```bash
# For standalone
cp -r ./data ~/.local/share/gitea-mirror
# For NixOS
sudo cp -r ./data /var/lib/gitea-mirror
sudo chown -R gitea-mirror:gitea-mirror /var/lib/gitea-mirror
```
3. **Copy secrets (if you want to keep them):**
```bash
# Extract from Docker volume
docker run --rm -v gitea-mirror_data:/data alpine \
cat /data/.better_auth_secret > better_auth_secret
docker run --rm -v gitea-mirror_data:/data alpine \
cat /data/.encryption_secret > encryption_secret
# Copy to new location
cp better_auth_secret ~/.local/share/gitea-mirror/.better_auth_secret
cp encryption_secret ~/.local/share/gitea-mirror/.encryption_secret
chmod 600 ~/.local/share/gitea-mirror/.*_secret
```
4. **Start Nix version:**
```bash
gitea-mirror
```
## CI/CD Integration
Example GitHub Actions workflow:
```yaml
name: Build with Nix
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v24
with:
extra_nix_config: |
experimental-features = nix-command flakes
- uses: cachix/cachix-action@v12
with:
name: gitea-mirror
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- run: nix build
- run: nix flake check
# Note: GitHub Actions runner usually has flakes enabled by install-nix-action
```
## Resources
- [Nix Manual](https://nixos.org/manual/nix/stable/)
- [NixOS Options Search](https://search.nixos.org/options)
- [Nix Pills Tutorial](https://nixos.org/guides/nix-pills/)
- [Project Documentation](../README.md)
- [Docker Deployment](../docker-compose.alt.yml) - Equivalent minimal config

352
docs/NIX_DISTRIBUTION.md Normal file
View File

@@ -0,0 +1,352 @@
# Nix Package Distribution Guide
This guide explains how Gitea Mirror is distributed via Nix and how users can consume it.
## Distribution Methods
### Method 1: Direct GitHub Usage (Zero Infrastructure)
**No CI, releases, or setup needed!** Users can consume directly from GitHub:
```bash
# Latest from main branch
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Pin to specific commit
nix run github:RayLabsHQ/gitea-mirror/abc123def
# Pin to git tag
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
```
**How it works:**
1. Nix fetches the repository from GitHub
2. Nix reads `flake.nix` and `flake.lock`
3. Nix builds the package locally on the user's machine
4. Package is cached in `/nix/store` for reuse
**Pros:**
- Zero infrastructure needed
- Works immediately after pushing code
- Users always get reproducible builds
**Cons:**
- Users must build from source (slower first time)
- Requires build dependencies (Bun, etc.)
---
### Method 2: Binary Cache (Recommended)
Pre-build packages and cache them so users download binaries instead of building:
#### Setup: Cachix (Free for Public Projects)
1. **Create account:** https://cachix.org/
2. **Create cache:** `gitea-mirror` (public)
3. **Add secret to GitHub:** `Settings → Secrets → CACHIX_AUTH_TOKEN`
4. **GitHub Actions builds automatically** (see `.github/workflows/nix-build.yml`)
#### User Experience:
```bash
# First time: Configure cache
cachix use gitea-mirror
# Or add to nix.conf:
# substituters = https://cache.nixos.org https://gitea-mirror.cachix.org
# trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= gitea-mirror.cachix.org-1:YOUR_KEY_HERE
# Then use normally - downloads pre-built binaries!
nix run github:RayLabsHQ/gitea-mirror
```
**Pros:**
- Fast installation (no compilation)
- Reduced bandwidth/CPU for users
- Professional experience
**Cons:**
- Requires Cachix account (free for public)
- Requires CI setup
---
### Method 3: nixpkgs Submission (Official Distribution)
Submit to the official Nix package repository for maximum visibility.
#### Process:
1. **Prepare package** (already done with `flake.nix`)
2. **Test thoroughly**
3. **Submit PR to nixpkgs:** https://github.com/NixOS/nixpkgs
#### User Experience:
```bash
# After acceptance into nixpkgs
nix run nixpkgs#gitea-mirror
# NixOS configuration
environment.systemPackages = [ pkgs.gitea-mirror ];
```
**Pros:**
- Maximum discoverability (official repo)
- Trusted by Nix community
- Included in NixOS search
- Binary caching by cache.nixos.org
**Cons:**
- Submission/review process
- Must follow nixpkgs guidelines
- Updates require PRs
---
## Current Distribution Strategy
### Phase 1: Direct GitHub (Immediate) ✅
Already working! Users can:
```bash
nix run github:RayLabsHQ/gitea-mirror
```
### Phase 2: Binary Cache (Recommended Next)
Set up Cachix for faster installs:
1. Create Cachix cache
2. Add `CACHIX_AUTH_TOKEN` secret to GitHub
3. Workflow already created in `.github/workflows/nix-build.yml`
4. Add instructions to docs
### Phase 3: Version Releases (Optional)
Tag releases for version pinning:
```bash
git tag v3.8.11
git push origin v3.8.11
# Users can then pin:
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
```
### Phase 4: nixpkgs Submission (Long Term)
Once package is stable and well-tested, submit to nixpkgs.
---
## User Documentation
### For Users: How to Install
Add this to your `docs/NIX_DEPLOYMENT.md`:
#### Option 1: Direct Install (No Configuration)
```bash
# Run immediately
nix run --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
# Install to profile
nix profile install --extra-experimental-features 'nix-command flakes' github:RayLabsHQ/gitea-mirror
```
#### Option 2: With Binary Cache (Faster)
```bash
# One-time setup
cachix use gitea-mirror
# Then install (downloads pre-built binary)
nix profile install github:RayLabsHQ/gitea-mirror
```
#### Option 3: Pin to Specific Version
```bash
# Pin to git tag
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
# Pin to commit
nix run github:RayLabsHQ/gitea-mirror/abc123def
# Lock in flake.nix
inputs.gitea-mirror.url = "github:RayLabsHQ/gitea-mirror/v3.8.11";
```
#### Option 4: NixOS Configuration
```nix
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
gitea-mirror.url = "github:RayLabsHQ/gitea-mirror";
# Or pin to version:
# gitea-mirror.url = "github:RayLabsHQ/gitea-mirror/v3.8.11";
};
outputs = { nixpkgs, gitea-mirror, ... }: {
nixosConfigurations.your-host = nixpkgs.lib.nixosSystem {
modules = [
gitea-mirror.nixosModules.default
{
services.gitea-mirror = {
enable = true;
betterAuthUrl = "https://mirror.example.com";
openFirewall = true;
};
}
];
};
};
}
```
---
## Maintaining the Distribution
### Releasing New Versions
```bash
# 1. Update version in package.json
vim package.json # Update version field
# 2. Update flake.nix version (line 17)
vim flake.nix # Update version = "X.Y.Z";
# 3. Commit changes
git add package.json flake.nix
git commit -m "chore: bump version to vX.Y.Z"
# 4. Create git tag
git tag vX.Y.Z
git push origin main
git push origin vX.Y.Z
# 5. GitHub Actions builds and caches automatically
```
Users can then pin to the new version:
```bash
nix run github:RayLabsHQ/gitea-mirror/vX.Y.Z
```
### Updating Flake Lock
The `flake.lock` file pins all dependencies. Update it periodically:
```bash
# Update all inputs
nix flake update
# Update specific input
nix flake lock --update-input nixpkgs
# Test after update
nix build
nix flake check
# Commit the updated lock file
git add flake.lock
git commit -m "chore: update flake dependencies"
git push
```
---
## Troubleshooting Distribution Issues
### Users Report Build Failures
1. **Check GitHub Actions:** Ensure CI is passing
2. **Test locally:** `nix flake check`
3. **Check flake.lock:** May need update if dependencies changed
### Cachix Not Working
1. **Verify cache exists:** https://gitea-mirror.cachix.org
2. **Check GitHub secret:** `CACHIX_AUTH_TOKEN` is set
3. **Review workflow logs:** Ensure build + push succeeded
### Version Pinning Not Working
```bash
# Verify tag exists
git tag -l
# Ensure tag is pushed
git ls-remote --tags origin
# Test specific tag
nix run github:RayLabsHQ/gitea-mirror/v3.8.11
```
---
## Advanced: Custom Binary Cache
If you prefer self-hosting instead of Cachix:
### Option 1: S3-Compatible Storage
```nix
# Generate signing key
nix-store --generate-binary-cache-key cache.example.com cache-priv-key.pem cache-pub-key.pem
# Push to S3
nix copy --to s3://my-nix-cache?region=us-east-1 $(nix-build)
```
Users configure:
```nix
substituters = https://my-bucket.s3.amazonaws.com/nix-cache
trusted-public-keys = cache.example.com:BASE64_PUBLIC_KEY
```
### Option 2: Self-Hosted Nix Store
Run `nix-serve` on your server:
```bash
# On server
nix-serve -p 8080
# Behind nginx/caddy
proxy_pass http://localhost:8080;
```
Users configure:
```nix
substituters = https://cache.example.com
trusted-public-keys = YOUR_KEY
```
---
## Comparison: Distribution Methods
| Method | Setup Time | User Speed | Cost | Discoverability |
|--------|-----------|------------|------|-----------------|
| Direct GitHub | 0 min | Slow (build) | Free | Low |
| Cachix | 5 min | Fast (binary) | Free (public) | Medium |
| nixpkgs | Hours/days | Fast (binary) | Free | High |
| Self-hosted | 30+ min | Fast (binary) | Server cost | Low |
**Recommendation:** Start with **Direct GitHub** (works now), add **Cachix** for better UX (5 min), consider **nixpkgs** later for maximum reach.
---
## Resources
- [Nix Flakes Documentation](https://nixos.wiki/wiki/Flakes)
- [Cachix Documentation](https://docs.cachix.org/)
- [nixpkgs Contributing Guide](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md)
- [Nix Binary Cache Setup](https://nixos.org/manual/nix/stable/package-management/binary-cache-substituter.html)

View File

@@ -81,6 +81,26 @@ Replace `{provider-id}` with your chosen Provider ID.
- Client Secret: [Your Okta Client Secret]
- Click "Discover" to auto-fill endpoints
### Example: Authentik SSO Setup
Working Authentik deployments (see [#134](https://github.com/RayLabsHQ/gitea-mirror/issues/134)) follow these steps:
1. In Authentik, create a new **Application** and OIDC **Provider** (implicit flow works well for testing).
2. Start creating an SSO provider inside Gitea Mirror so you can copy the redirect URL shown (`https://your-domain.com/api/auth/sso/callback/authentik` if you pick `authentik` as your Provider ID).
3. Paste that redirect URL into the Authentik Provider configuration and finish creating the provider.
4. Copy the Authentik issuer URL, client ID, and client secret.
5. Back in Gitea Mirror:
- Issuer URL: the exact value from Authentik (keep any trailing slash Authentik shows).
- Provider ID: match the one you used in step 2.
- Click **Discover** so Gitea Mirror stores the authorization, token, and JWKS endpoints (Authentik publishes them via discovery).
- Domain: enter the email domain you expect to match (e.g. `example.com`).
6. Save the provider and test the login flow.
Notes:
- Make sure `BETTER_AUTH_URL` and (if you serve the UI from multiple origins) `BETTER_AUTH_TRUSTED_ORIGINS` point at the public URL users reach. A mismatch can surface as 500 errors after redirect.
- Authentik must report the users email as verified (default behavior) so Gitea Mirror can auto-link accounts.
- If you created an Authentik provider before v3.8.10 you should delete it and re-add it after upgrading; older versions saved incomplete endpoint data which leads to the `url.startsWith` error explained in the Troubleshooting section.
## Setting up OIDC Provider
The OIDC Provider feature allows other applications to use Gitea Mirror as their authentication provider.
@@ -165,6 +185,7 @@ When an application requests authentication:
1. **"Invalid origin" error**: Check that your Gitea Mirror URL matches the configured redirect URI
2. **"Provider not found" error**: Ensure the provider is properly configured and enabled
3. **Redirect loop**: Verify the redirect URI in both Gitea Mirror and the SSO provider match exactly
4. **`TypeError: undefined is not an object (evaluating 'url.startsWith')`**: This indicates the stored provider configuration is missing OIDC endpoints. Delete the provider from Gitea Mirror and re-register it using the **Discover** button so authorization/token URLs are saved (see [#73](https://github.com/RayLabsHQ/gitea-mirror/issues/73) and [#122](https://github.com/RayLabsHQ/gitea-mirror/issues/122) for examples).
### OIDC Provider Issues

View File

@@ -0,0 +1,4 @@
ALTER TABLE `accounts` ADD `id_token` text;--> statement-breakpoint
ALTER TABLE `accounts` ADD `access_token_expires_at` integer;--> statement-breakpoint
ALTER TABLE `accounts` ADD `refresh_token_expires_at` integer;--> statement-breakpoint
ALTER TABLE `accounts` ADD `scope` text;

View File

@@ -0,0 +1,18 @@
ALTER TABLE `organizations` ADD `normalized_name` text NOT NULL DEFAULT '';--> statement-breakpoint
UPDATE `organizations` SET `normalized_name` = lower(trim(`name`));--> statement-breakpoint
DELETE FROM `organizations`
WHERE rowid NOT IN (
SELECT MIN(rowid)
FROM `organizations`
GROUP BY `user_id`, `normalized_name`
);--> statement-breakpoint
CREATE UNIQUE INDEX `uniq_organizations_user_normalized_name` ON `organizations` (`user_id`,`normalized_name`);--> statement-breakpoint
ALTER TABLE `repositories` ADD `normalized_full_name` text NOT NULL DEFAULT '';--> statement-breakpoint
UPDATE `repositories` SET `normalized_full_name` = lower(trim(`full_name`));--> statement-breakpoint
DELETE FROM `repositories`
WHERE rowid NOT IN (
SELECT MIN(rowid)
FROM `repositories`
GROUP BY `user_id`, `normalized_full_name`
);--> statement-breakpoint
CREATE UNIQUE INDEX `uniq_repositories_user_normalized_full_name` ON `repositories` (`user_id`,`normalized_full_name`);

View File

@@ -0,0 +1 @@
ALTER TABLE `repositories` ADD `metadata` text;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -43,6 +43,27 @@
"when": 1757786449446,
"tag": "0005_polite_preak",
"breakpoints": true
},
{
"idx": 6,
"version": "6",
"when": 1761483928546,
"tag": "0006_military_la_nuit",
"breakpoints": true
},
{
"idx": 7,
"version": "6",
"when": 1761534391115,
"tag": "0007_whole_hellion",
"breakpoints": true
},
{
"idx": 8,
"version": "6",
"when": 1761802056073,
"tag": "0008_serious_thena",
"breakpoints": true
}
]
}

61
flake.lock generated Normal file
View File

@@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1761672384,
"narHash": "sha256-o9KF3DJL7g7iYMZq9SWgfS1BFlNbsm6xplRjVlOCkXI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "08dacfca559e1d7da38f3cf05f1f45ee9bfd213c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

395
flake.nix Normal file
View File

@@ -0,0 +1,395 @@
{
description = "Gitea Mirror - Self-hosted GitHub to Gitea mirroring service";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
# Build the application
gitea-mirror = pkgs.stdenv.mkDerivation {
pname = "gitea-mirror";
version = "3.8.11";
src = ./.;
nativeBuildInputs = with pkgs; [
bun
];
buildInputs = with pkgs; [
sqlite
openssl
];
configurePhase = ''
export HOME=$TMPDIR
export BUN_INSTALL=$TMPDIR/.bun
export PATH=$BUN_INSTALL/bin:$PATH
'';
buildPhase = ''
# Install dependencies
bun install --frozen-lockfile --no-progress
# Build the application
bun run build
'';
installPhase = ''
mkdir -p $out/lib/gitea-mirror
mkdir -p $out/bin
# Copy the built application
cp -r dist $out/lib/gitea-mirror/
cp -r node_modules $out/lib/gitea-mirror/
cp -r scripts $out/lib/gitea-mirror/
cp package.json $out/lib/gitea-mirror/
# Create entrypoint script that matches Docker behavior
cat > $out/bin/gitea-mirror <<'EOF'
#!/usr/bin/env bash
set -e
# === DEFAULT CONFIGURATION ===
# These match docker-compose.alt.yml defaults
export DATA_DIR=''${DATA_DIR:-"$HOME/.local/share/gitea-mirror"}
export DATABASE_URL=''${DATABASE_URL:-"file:$DATA_DIR/gitea-mirror.db"}
export HOST=''${HOST:-"0.0.0.0"}
export PORT=''${PORT:-"4321"}
export NODE_ENV=''${NODE_ENV:-"production"}
# Better Auth configuration
export BETTER_AUTH_URL=''${BETTER_AUTH_URL:-"http://localhost:4321"}
export BETTER_AUTH_TRUSTED_ORIGINS=''${BETTER_AUTH_TRUSTED_ORIGINS:-"http://localhost:4321"}
export PUBLIC_BETTER_AUTH_URL=''${PUBLIC_BETTER_AUTH_URL:-"http://localhost:4321"}
# Concurrency settings (match docker-compose.alt.yml)
export MIRROR_ISSUE_CONCURRENCY=''${MIRROR_ISSUE_CONCURRENCY:-3}
export MIRROR_PULL_REQUEST_CONCURRENCY=''${MIRROR_PULL_REQUEST_CONCURRENCY:-5}
# Create data directory
mkdir -p "$DATA_DIR"
cd $out/lib/gitea-mirror
# === AUTO-GENERATE SECRETS ===
BETTER_AUTH_SECRET_FILE="$DATA_DIR/.better_auth_secret"
ENCRYPTION_SECRET_FILE="$DATA_DIR/.encryption_secret"
# Generate BETTER_AUTH_SECRET if not provided
if [ -z "$BETTER_AUTH_SECRET" ]; then
if [ -f "$BETTER_AUTH_SECRET_FILE" ]; then
echo "Using previously generated BETTER_AUTH_SECRET"
export BETTER_AUTH_SECRET=$(cat "$BETTER_AUTH_SECRET_FILE")
else
echo "Generating a secure random BETTER_AUTH_SECRET"
GENERATED_SECRET=$(${pkgs.openssl}/bin/openssl rand -hex 32)
export BETTER_AUTH_SECRET="$GENERATED_SECRET"
echo "$GENERATED_SECRET" > "$BETTER_AUTH_SECRET_FILE"
chmod 600 "$BETTER_AUTH_SECRET_FILE"
echo " BETTER_AUTH_SECRET generated and saved to $BETTER_AUTH_SECRET_FILE"
fi
fi
# Generate ENCRYPTION_SECRET if not provided
if [ -z "$ENCRYPTION_SECRET" ]; then
if [ -f "$ENCRYPTION_SECRET_FILE" ]; then
echo "Using previously generated ENCRYPTION_SECRET"
export ENCRYPTION_SECRET=$(cat "$ENCRYPTION_SECRET_FILE")
else
echo "Generating a secure random ENCRYPTION_SECRET"
GENERATED_ENCRYPTION_SECRET=$(${pkgs.openssl}/bin/openssl rand -base64 36)
export ENCRYPTION_SECRET="$GENERATED_ENCRYPTION_SECRET"
echo "$GENERATED_ENCRYPTION_SECRET" > "$ENCRYPTION_SECRET_FILE"
chmod 600 "$ENCRYPTION_SECRET_FILE"
echo " ENCRYPTION_SECRET generated and saved to $ENCRYPTION_SECRET_FILE"
fi
fi
# === DATABASE INITIALIZATION ===
DB_PATH=$(echo "$DATABASE_URL" | sed 's|^file:||')
if [ ! -f "$DB_PATH" ]; then
echo "Database not found. It will be created and initialized via Drizzle migrations on first app startup..."
touch "$DB_PATH"
else
echo "Database already exists, Drizzle will check for pending migrations on startup..."
fi
# === STARTUP SCRIPTS ===
# Initialize configuration from environment variables
echo "Checking for environment configuration..."
if [ -f "dist/scripts/startup-env-config.js" ]; then
echo "Loading configuration from environment variables..."
${pkgs.bun}/bin/bun dist/scripts/startup-env-config.js && \
echo " Environment configuration loaded successfully" || \
echo " Environment configuration loading completed with warnings"
fi
# Run startup recovery
echo "Running startup recovery..."
if [ -f "dist/scripts/startup-recovery.js" ]; then
${pkgs.bun}/bin/bun dist/scripts/startup-recovery.js --timeout=30000 && \
echo " Startup recovery completed successfully" || \
echo " Startup recovery completed with warnings"
fi
# Run repository status repair
echo "Running repository status repair..."
if [ -f "dist/scripts/repair-mirrored-repos.js" ]; then
${pkgs.bun}/bin/bun dist/scripts/repair-mirrored-repos.js --startup && \
echo " Repository status repair completed successfully" || \
echo " Repository status repair completed with warnings"
fi
# === SIGNAL HANDLING ===
shutdown_handler() {
echo "🛑 Received shutdown signal, forwarding to application..."
if [ ! -z "$APP_PID" ]; then
kill -TERM "$APP_PID" 2>/dev/null || true
wait "$APP_PID" 2>/dev/null || true
fi
exit 0
}
trap 'shutdown_handler' TERM INT HUP
# === START APPLICATION ===
echo "Starting Gitea Mirror..."
echo "Access the web interface at $BETTER_AUTH_URL"
${pkgs.bun}/bin/bun dist/server/entry.mjs &
APP_PID=$!
wait "$APP_PID"
EOF
chmod +x $out/bin/gitea-mirror
# Create database management helper
cat > $out/bin/gitea-mirror-db <<'EOF'
#!/usr/bin/env bash
export DATA_DIR=''${DATA_DIR:-"$HOME/.local/share/gitea-mirror"}
mkdir -p "$DATA_DIR"
cd $out/lib/gitea-mirror
exec ${pkgs.bun}/bin/bun scripts/manage-db.ts "$@"
EOF
chmod +x $out/bin/gitea-mirror-db
'';
meta = with pkgs.lib; {
description = "Self-hosted GitHub to Gitea mirroring service";
homepage = "https://github.com/RayLabsHQ/gitea-mirror";
license = licenses.mit;
maintainers = [ ];
platforms = platforms.linux ++ platforms.darwin;
};
};
in
{
packages = {
default = gitea-mirror;
gitea-mirror = gitea-mirror;
};
# Development shell
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
bun
sqlite
openssl
];
shellHook = ''
echo "🚀 Gitea Mirror development environment"
echo ""
echo "Quick start:"
echo " bun install # Install dependencies"
echo " bun run dev # Start development server"
echo " bun run build # Build for production"
echo ""
echo "Database:"
echo " bun run manage-db init # Initialize database"
echo " bun run db:studio # Open Drizzle Studio"
'';
};
# NixOS module
nixosModules.default = { config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.gitea-mirror;
in {
options.services.gitea-mirror = {
enable = mkEnableOption "Gitea Mirror service";
package = mkOption {
type = types.package;
default = self.packages.${system}.default;
description = "The Gitea Mirror package to use";
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/gitea-mirror";
description = "Directory to store data and database";
};
user = mkOption {
type = types.str;
default = "gitea-mirror";
description = "User account under which Gitea Mirror runs";
};
group = mkOption {
type = types.str;
default = "gitea-mirror";
description = "Group under which Gitea Mirror runs";
};
host = mkOption {
type = types.str;
default = "0.0.0.0";
description = "Host to bind to";
};
port = mkOption {
type = types.port;
default = 4321;
description = "Port to listen on";
};
betterAuthUrl = mkOption {
type = types.str;
default = "http://localhost:4321";
description = "Better Auth URL (external URL of the service)";
};
betterAuthTrustedOrigins = mkOption {
type = types.str;
default = "http://localhost:4321";
description = "Comma-separated list of trusted origins for Better Auth";
};
mirrorIssueConcurrency = mkOption {
type = types.int;
default = 3;
description = "Number of concurrent issue mirror operations (set to 1 for perfect ordering)";
};
mirrorPullRequestConcurrency = mkOption {
type = types.int;
default = 5;
description = "Number of concurrent PR mirror operations (set to 1 for perfect ordering)";
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path to file containing environment variables.
Only needed if you want to set BETTER_AUTH_SECRET or ENCRYPTION_SECRET manually.
Otherwise, secrets will be auto-generated and stored in the data directory.
Example:
BETTER_AUTH_SECRET=your-32-character-secret-here
ENCRYPTION_SECRET=your-encryption-secret-here
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open the firewall for the specified port";
};
};
config = mkIf cfg.enable {
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
home = cfg.dataDir;
createHome = true;
};
users.groups.${cfg.group} = {};
systemd.services.gitea-mirror = {
description = "Gitea Mirror - GitHub to Gitea mirroring service";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
DATA_DIR = cfg.dataDir;
DATABASE_URL = "file:${cfg.dataDir}/gitea-mirror.db";
HOST = cfg.host;
PORT = toString cfg.port;
NODE_ENV = "production";
BETTER_AUTH_URL = cfg.betterAuthUrl;
BETTER_AUTH_TRUSTED_ORIGINS = cfg.betterAuthTrustedOrigins;
PUBLIC_BETTER_AUTH_URL = cfg.betterAuthUrl;
MIRROR_ISSUE_CONCURRENCY = toString cfg.mirrorIssueConcurrency;
MIRROR_PULL_REQUEST_CONCURRENCY = toString cfg.mirrorPullRequestConcurrency;
};
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.package}/bin/gitea-mirror";
Restart = "always";
RestartSec = "10s";
# Security hardening
NoNewPrivileges = true;
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = true;
ReadWritePaths = [ cfg.dataDir ];
# Load environment file if specified (optional)
EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
# Graceful shutdown
TimeoutStopSec = "30s";
KillMode = "mixed";
KillSignal = "SIGTERM";
};
};
# Health check timer (optional monitoring)
systemd.timers.gitea-mirror-healthcheck = mkIf cfg.enable {
description = "Gitea Mirror health check timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnBootSec = "5min";
OnUnitActiveSec = "5min";
};
};
systemd.services.gitea-mirror-healthcheck = mkIf cfg.enable {
description = "Gitea Mirror health check";
after = [ "gitea-mirror.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.curl}/bin/curl -f http://${cfg.host}:${toString cfg.port}/api/health || true";
User = "nobody";
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
};
};
}
) // {
# Overlay for adding to nixpkgs
overlays.default = final: prev: {
gitea-mirror = self.packages.${final.system}.default;
};
};
}

View File

@@ -1,7 +1,7 @@
{
"name": "gitea-mirror",
"type": "module",
"version": "3.8.7",
"version": "3.8.11",
"engines": {
"bun": ">=1.2.9"
},
@@ -75,7 +75,7 @@
"astro": "^5.14.8",
"bcryptjs": "^3.0.2",
"buffer": "^6.0.3",
"better-auth": "1.4.0-beta.12",
"better-auth": "1.4.0-beta.13",
"canvas-confetti": "^1.9.3",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",

View File

@@ -114,10 +114,10 @@ EOF
echo "======================================"
echo "1. Access Authentik at http://localhost:9000"
echo "2. Login with akadmin / admin-password"
echo "3. Create OAuth2 Provider for Gitea Mirror:"
echo "3. Create an Authentik OIDC Provider for Gitea Mirror:"
echo " - Name: gitea-mirror"
echo " - Redirect URIs:"
echo " http://localhost:4321/api/auth/callback/sso-provider"
echo " - Redirect URI:"
echo " http://localhost:4321/api/auth/sso/callback/authentik"
echo " - Scopes: openid, profile, email"
echo ""
echo "4. Create Application:"
@@ -131,10 +131,14 @@ EOF
echo "6. Configure SSO in Gitea Mirror:"
echo " - Go to Settings → Authentication & SSO"
echo " - Add provider with:"
echo " - Provider ID: authentik"
echo " - Issuer URL: http://localhost:9000/application/o/gitea-mirror/"
echo " - Click Discover to pull Authentik endpoints"
echo " - Client ID: (from Authentik provider)"
echo " - Client Secret: (from Authentik provider)"
echo ""
echo "If you previously registered this provider on a version earlier than v3.8.10, delete it and re-add it after upgrading to avoid missing endpoint data."
echo ""
;;
stop)

View File

@@ -306,7 +306,7 @@ export function Dashboard() {
title="Repositories"
value={repoCount}
icon={<GitFork className="h-4 w-4" />}
description="Total in mirror queue"
description="Total imported repositories"
/>
<StatusCard
title="Mirrored"

View File

@@ -1,5 +1,5 @@
import * as React from "react";
import { useState } from "react";
import { useEffect, useState } from "react";
import { Button } from "@/components/ui/button";
import {
Dialog,
@@ -20,9 +20,11 @@ interface AddOrganizationDialogProps {
onAddOrganization: ({
org,
role,
force,
}: {
org: string;
role: MembershipRole;
force?: boolean;
}) => Promise<void>;
}
@@ -36,6 +38,14 @@ export default function AddOrganizationDialog({
const [isLoading, setIsLoading] = useState<boolean>(false);
const [error, setError] = useState<string>("");
useEffect(() => {
if (!isDialogOpen) {
setError("");
setOrg("");
setRole("member");
}
}, [isDialogOpen]);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
@@ -54,7 +64,7 @@ export default function AddOrganizationDialog({
setRole("member");
setIsDialogOpen(false);
} catch (err: any) {
setError(err?.message || "Failed to add repository.");
setError(err?.message || "Failed to add organization.");
} finally {
setIsLoading(false);
}
@@ -139,7 +149,7 @@ export default function AddOrganizationDialog({
{isLoading ? (
<LoaderCircle className="h-4 w-4 animate-spin" />
) : (
"Add Repository"
"Add Organization"
)}
</Button>
</div>

View File

@@ -1,6 +1,6 @@
import { useCallback, useEffect, useState } from "react";
import { Button } from "@/components/ui/button";
import { Search, RefreshCw, FlipHorizontal, Filter } from "lucide-react";
import { Search, RefreshCw, FlipHorizontal, Filter, LoaderCircle, Trash2 } from "lucide-react";
import type { MirrorJob, Organization } from "@/lib/db/schema";
import { OrganizationList } from "./OrganizationsList";
import AddOrganizationDialog from "./AddOrganizationDialog";
@@ -37,6 +37,14 @@ import {
DrawerTitle,
DrawerTrigger,
} from "@/components/ui/drawer";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
export function Organization() {
const [organizations, setOrganizations] = useState<Organization[]>([]);
@@ -52,6 +60,15 @@ export function Organization() {
status: "",
});
const [loadingOrgIds, setLoadingOrgIds] = useState<Set<string>>(new Set()); // this is used when the api actions are performed
const [duplicateOrgCandidate, setDuplicateOrgCandidate] = useState<{
org: string;
role: MembershipRole;
} | null>(null);
const [isDuplicateOrgDialogOpen, setIsDuplicateOrgDialogOpen] = useState(false);
const [isProcessingDuplicateOrg, setIsProcessingDuplicateOrg] = useState(false);
const [orgToDelete, setOrgToDelete] = useState<Organization | null>(null);
const [isDeleteOrgDialogOpen, setIsDeleteOrgDialogOpen] = useState(false);
const [isDeletingOrg, setIsDeletingOrg] = useState(false);
// Create a stable callback using useCallback
const handleNewMessage = useCallback((data: MirrorJob) => {
@@ -256,19 +273,45 @@ export function Organization() {
const handleAddOrganization = async ({
org,
role,
force = false,
}: {
org: string;
role: MembershipRole;
force?: boolean;
}) => {
try {
if (!user || !user.id) {
return;
}
const trimmedOrg = org.trim();
const normalizedOrg = trimmedOrg.toLowerCase();
if (!trimmedOrg) {
toast.error("Please enter a valid organization name.");
throw new Error("Invalid organization name");
}
if (!force) {
const alreadyExists = organizations.some(
(existing) => existing.name?.trim().toLowerCase() === normalizedOrg
);
if (alreadyExists) {
toast.warning("Organization already exists.");
setDuplicateOrgCandidate({ org: trimmedOrg, role });
setIsDuplicateOrgDialogOpen(true);
throw new Error("Organization already exists");
}
}
try {
setIsLoading(true);
const reqPayload: AddOrganizationApiRequest = {
userId: user.id,
org,
org: trimmedOrg,
role,
force,
};
const response = await apiRequest<AddOrganizationApiResponse>(
@@ -280,25 +323,100 @@ export function Organization() {
);
if (response.success) {
toast.success(`Organization added successfully`);
setOrganizations((prev) => [...prev, response.organization]);
const message = force
? "Organization already exists; using existing entry."
: "Organization added successfully";
toast.success(message);
await fetchOrganizations();
await fetchOrganizations(false);
setFilter((prev) => ({
...prev,
searchTerm: org,
searchTerm: trimmedOrg,
}));
if (force) {
setIsDuplicateOrgDialogOpen(false);
setDuplicateOrgCandidate(null);
}
} else {
showErrorToast(response.error || "Error adding organization", toast);
}
} catch (error) {
showErrorToast(error, toast);
throw error;
} finally {
setIsLoading(false);
}
};
const handleConfirmDuplicateOrganization = async () => {
if (!duplicateOrgCandidate) {
return;
}
setIsProcessingDuplicateOrg(true);
try {
await handleAddOrganization({
org: duplicateOrgCandidate.org,
role: duplicateOrgCandidate.role,
force: true,
});
setIsDialogOpen(false);
setDuplicateOrgCandidate(null);
setIsDuplicateOrgDialogOpen(false);
} catch (error) {
// Error already surfaced via toast
} finally {
setIsProcessingDuplicateOrg(false);
}
};
const handleCancelDuplicateOrganization = () => {
setIsDuplicateOrgDialogOpen(false);
setDuplicateOrgCandidate(null);
};
const handleRequestDeleteOrganization = (orgId: string) => {
const org = organizations.find((item) => item.id === orgId);
if (!org) {
toast.error("Organization not found");
return;
}
setOrgToDelete(org);
setIsDeleteOrgDialogOpen(true);
};
const handleDeleteOrganization = async () => {
if (!user || !user.id || !orgToDelete) {
return;
}
setIsDeletingOrg(true);
try {
const response = await apiRequest<{ success: boolean; error?: string }>(
`/organizations/${orgToDelete.id}`,
{
method: "DELETE",
}
);
if (response.success) {
toast.success(`Removed ${orgToDelete.name} from Gitea Mirror.`);
await fetchOrganizations(false);
} else {
showErrorToast(response.error || "Failed to delete organization", toast);
}
} catch (error) {
showErrorToast(error, toast);
} finally {
setIsDeletingOrg(false);
setIsDeleteOrgDialogOpen(false);
setOrgToDelete(null);
}
};
const handleMirrorAllOrgs = async () => {
try {
if (!user || !user.id || organizations.length === 0) {
@@ -711,6 +829,7 @@ export function Organization() {
onMirror={handleMirrorOrg}
onIgnore={handleIgnoreOrg}
onAddOrganization={() => setIsDialogOpen(true)}
onDelete={handleRequestDeleteOrganization}
onRefresh={async () => {
await fetchOrganizations(false);
}}
@@ -721,6 +840,68 @@ export function Organization() {
isDialogOpen={isDialogOpen}
setIsDialogOpen={setIsDialogOpen}
/>
<Dialog open={isDuplicateOrgDialogOpen} onOpenChange={(open) => {
if (!open) {
handleCancelDuplicateOrganization();
}
}}>
<DialogContent>
<DialogHeader>
<DialogTitle>Organization already exists</DialogTitle>
<DialogDescription>
{duplicateOrgCandidate?.org ?? "This organization"} is already synced in Gitea Mirror.
Continuing will reuse the existing entry without creating a duplicate. You can remove it later if needed.
</DialogDescription>
</DialogHeader>
<DialogFooter>
<Button variant="outline" onClick={handleCancelDuplicateOrganization} disabled={isProcessingDuplicateOrg}>
Cancel
</Button>
<Button onClick={handleConfirmDuplicateOrganization} disabled={isProcessingDuplicateOrg}>
{isProcessingDuplicateOrg ? (
<LoaderCircle className="h-4 w-4 animate-spin" />
) : (
"Continue"
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
<Dialog open={isDeleteOrgDialogOpen} onOpenChange={(open) => {
if (!open) {
setIsDeleteOrgDialogOpen(false);
setOrgToDelete(null);
}
}}>
<DialogContent>
<DialogHeader>
<DialogTitle>Remove organization from Gitea Mirror?</DialogTitle>
<DialogDescription>
{orgToDelete?.name ?? "This organization"} will be deleted from Gitea Mirror only. Nothing will be removed from Gitea; you will need to clean it up manually in Gitea if desired.
</DialogDescription>
</DialogHeader>
<DialogFooter>
<Button variant="outline" onClick={() => {
setIsDeleteOrgDialogOpen(false);
setOrgToDelete(null);
}} disabled={isDeletingOrg}>
Cancel
</Button>
<Button variant="destructive" onClick={handleDeleteOrganization} disabled={isDeletingOrg}>
{isDeletingOrg ? (
<LoaderCircle className="h-4 w-4 animate-spin" />
) : (
<span className="flex items-center gap-2">
<Trash2 className="h-4 w-4" />
Delete
</span>
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
</div>
);
}

View File

@@ -2,7 +2,7 @@ import { useMemo } from "react";
import { Card } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import { Plus, RefreshCw, Building2, Check, AlertCircle, Clock, MoreVertical, Ban } from "lucide-react";
import { Plus, RefreshCw, Building2, Check, AlertCircle, Clock, MoreVertical, Ban, Trash2 } from "lucide-react";
import { SiGithub, SiGitea } from "react-icons/si";
import type { Organization } from "@/lib/db/schema";
import type { FilterParams } from "@/types/filter";
@@ -30,6 +30,7 @@ interface OrganizationListProps {
loadingOrgIds: Set<string>;
onAddOrganization?: () => void;
onRefresh?: () => Promise<void>;
onDelete?: (orgId: string) => void;
}
// Helper function to get status badge variant and icon
@@ -60,6 +61,7 @@ export function OrganizationList({
loadingOrgIds,
onAddOrganization,
onRefresh,
onDelete,
}: OrganizationListProps) {
const { giteaConfig } = useGiteaConfig();
@@ -414,7 +416,7 @@ export function OrganizationList({
)}
{/* Dropdown menu for additional actions */}
{org.status !== "ignored" && org.status !== "mirroring" && (
{org.status !== "mirroring" && (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" size="icon" disabled={isLoading} className="h-10 w-10">
@@ -422,12 +424,26 @@ export function OrganizationList({
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
{org.status !== "ignored" && (
<DropdownMenuItem
onClick={() => org.id && onIgnore && onIgnore({ orgId: org.id, ignore: true })}
>
<Ban className="h-4 w-4 mr-2" />
Ignore Organization
</DropdownMenuItem>
)}
{onDelete && (
<>
{org.status !== "ignored" && <DropdownMenuSeparator />}
<DropdownMenuItem
className="text-destructive focus:text-destructive"
onClick={() => org.id && onDelete(org.id)}
>
<Trash2 className="h-4 w-4 mr-2" />
Delete from Mirror
</DropdownMenuItem>
</>
)}
</DropdownMenuContent>
</DropdownMenu>
)}
@@ -561,7 +577,7 @@ export function OrganizationList({
)}
{/* Dropdown menu for additional actions */}
{org.status !== "ignored" && org.status !== "mirroring" && (
{org.status !== "mirroring" && (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" size="icon" disabled={isLoading}>
@@ -569,12 +585,26 @@ export function OrganizationList({
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
{org.status !== "ignored" && (
<DropdownMenuItem
onClick={() => org.id && onIgnore && onIgnore({ orgId: org.id, ignore: true })}
>
<Ban className="h-4 w-4 mr-2" />
Ignore Organization
</DropdownMenuItem>
)}
{onDelete && (
<>
{org.status !== "ignored" && <DropdownMenuSeparator />}
<DropdownMenuItem
className="text-destructive focus:text-destructive"
onClick={() => org.id && onDelete(org.id)}
>
<Trash2 className="h-4 w-4 mr-2" />
Delete from Mirror
</DropdownMenuItem>
</>
)}
</DropdownMenuContent>
</DropdownMenu>
)}

View File

@@ -1,5 +1,5 @@
import * as React from "react";
import { useState } from "react";
import { useEffect, useState } from "react";
import { Button } from "@/components/ui/button";
import {
Dialog,
@@ -17,9 +17,11 @@ interface AddRepositoryDialogProps {
onAddRepository: ({
repo,
owner,
force,
}: {
repo: string;
owner: string;
force?: boolean;
}) => Promise<void>;
}
@@ -33,6 +35,14 @@ export default function AddRepositoryDialog({
const [isLoading, setIsLoading] = useState<boolean>(false);
const [error, setError] = useState<string>("");
useEffect(() => {
if (!isDialogOpen) {
setError("");
setRepo("");
setOwner("");
}
}, [isDialogOpen]);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();

View File

@@ -18,7 +18,7 @@ import {
SelectValue,
} from "../ui/select";
import { Button } from "@/components/ui/button";
import { Search, RefreshCw, FlipHorizontal, RotateCcw, X, Filter, Ban, Check } from "lucide-react";
import { Search, RefreshCw, FlipHorizontal, RotateCcw, X, Filter, Ban, Check, LoaderCircle, Trash2 } from "lucide-react";
import type { MirrorRepoRequest, MirrorRepoResponse } from "@/types/mirror";
import {
Drawer,
@@ -30,6 +30,14 @@ import {
DrawerTitle,
DrawerTrigger,
} from "@/components/ui/drawer";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import { useSSE } from "@/hooks/useSEE";
import { useFilterParams } from "@/hooks/useFilterParams";
import { toast } from "sonner";
@@ -69,6 +77,15 @@ export default function Repository() {
}, [setFilter]);
const [loadingRepoIds, setLoadingRepoIds] = useState<Set<string>>(new Set()); // this is used when the api actions are performed
const [duplicateRepoCandidate, setDuplicateRepoCandidate] = useState<{
owner: string;
repo: string;
} | null>(null);
const [isDuplicateRepoDialogOpen, setIsDuplicateRepoDialogOpen] = useState(false);
const [isProcessingDuplicateRepo, setIsProcessingDuplicateRepo] = useState(false);
const [repoToDelete, setRepoToDelete] = useState<Repository | null>(null);
const [isDeleteRepoDialogOpen, setIsDeleteRepoDialogOpen] = useState(false);
const [isDeletingRepo, setIsDeletingRepo] = useState(false);
// Create a stable callback using useCallback
const handleNewMessage = useCallback((data: MirrorJob) => {
@@ -618,19 +635,45 @@ export default function Repository() {
const handleAddRepository = async ({
repo,
owner,
force = false,
}: {
repo: string;
owner: string;
force?: boolean;
}) => {
try {
if (!user || !user.id) {
return;
}
const trimmedRepo = repo.trim();
const trimmedOwner = owner.trim();
if (!trimmedRepo || !trimmedOwner) {
toast.error("Please provide both owner and repository name.");
throw new Error("Invalid repository details");
}
const normalizedFullName = `${trimmedOwner}/${trimmedRepo}`.toLowerCase();
if (!force) {
const duplicateRepo = repositories.find(
(existing) => existing.normalizedFullName?.toLowerCase() === normalizedFullName
);
if (duplicateRepo) {
toast.warning("Repository already exists.");
setDuplicateRepoCandidate({ repo: trimmedRepo, owner: trimmedOwner });
setIsDuplicateRepoDialogOpen(true);
throw new Error("Repository already exists");
}
}
try {
const reqPayload: AddRepositoriesApiRequest = {
userId: user.id,
repo,
owner,
repo: trimmedRepo,
owner: trimmedOwner,
force,
};
const response = await apiRequest<AddRepositoriesApiResponse>(
@@ -642,20 +685,28 @@ export default function Repository() {
);
if (response.success) {
toast.success(`Repository added successfully`);
setRepositories((prevRepos) => [...prevRepos, response.repository]);
const message = force
? "Repository already exists; metadata refreshed."
: "Repository added successfully";
toast.success(message);
await fetchRepositories(false); // Manual refresh after adding repository
await fetchRepositories(false);
setFilter((prev) => ({
...prev,
searchTerm: repo,
searchTerm: trimmedRepo,
}));
if (force) {
setDuplicateRepoCandidate(null);
setIsDuplicateRepoDialogOpen(false);
}
} else {
showErrorToast(response.error || "Error adding repository", toast);
}
} catch (error) {
showErrorToast(error, toast);
throw error;
}
};
@@ -673,6 +724,71 @@ export default function Repository() {
)
).sort();
const handleConfirmDuplicateRepository = async () => {
if (!duplicateRepoCandidate) {
return;
}
setIsProcessingDuplicateRepo(true);
try {
await handleAddRepository({
repo: duplicateRepoCandidate.repo,
owner: duplicateRepoCandidate.owner,
force: true,
});
setIsDialogOpen(false);
} catch (error) {
// Error already shown
} finally {
setIsProcessingDuplicateRepo(false);
}
};
const handleCancelDuplicateRepository = () => {
setDuplicateRepoCandidate(null);
setIsDuplicateRepoDialogOpen(false);
};
const handleRequestDeleteRepository = (repoId: string) => {
const repo = repositories.find((item) => item.id === repoId);
if (!repo) {
toast.error("Repository not found");
return;
}
setRepoToDelete(repo);
setIsDeleteRepoDialogOpen(true);
};
const handleDeleteRepository = async () => {
if (!user || !user.id || !repoToDelete) {
return;
}
setIsDeletingRepo(true);
try {
const response = await apiRequest<{ success: boolean; error?: string }>(
`/repositories/${repoToDelete.id}`,
{
method: "DELETE",
}
);
if (response.success) {
toast.success(`Removed ${repoToDelete.fullName} from Gitea Mirror.`);
await fetchRepositories(false);
} else {
showErrorToast(response.error || "Failed to delete repository", toast);
}
} catch (error) {
showErrorToast(error, toast);
} finally {
setIsDeletingRepo(false);
setIsDeleteRepoDialogOpen(false);
setRepoToDelete(null);
}
};
// Determine what actions are available for selected repositories
const getAvailableActions = () => {
if (selectedRepoIds.size === 0) return [];
@@ -1198,6 +1314,7 @@ export default function Repository() {
onRefresh={async () => {
await fetchRepositories(false);
}}
onDelete={handleRequestDeleteRepository}
/>
)}
@@ -1206,6 +1323,77 @@ export default function Repository() {
isDialogOpen={isDialogOpen}
setIsDialogOpen={setIsDialogOpen}
/>
<Dialog
open={isDuplicateRepoDialogOpen}
onOpenChange={(open) => {
if (!open) {
handleCancelDuplicateRepository();
}
}}
>
<DialogContent>
<DialogHeader>
<DialogTitle>Repository already exists</DialogTitle>
<DialogDescription>
{duplicateRepoCandidate ? `${duplicateRepoCandidate.owner}/${duplicateRepoCandidate.repo}` : "This repository"} is already tracked in Gitea Mirror. Continuing will refresh the existing entry without creating a duplicate.
</DialogDescription>
</DialogHeader>
<DialogFooter>
<Button variant="outline" onClick={handleCancelDuplicateRepository} disabled={isProcessingDuplicateRepo}>
Cancel
</Button>
<Button onClick={handleConfirmDuplicateRepository} disabled={isProcessingDuplicateRepo}>
{isProcessingDuplicateRepo ? (
<LoaderCircle className="h-4 w-4 animate-spin" />
) : (
"Continue"
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
<Dialog
open={isDeleteRepoDialogOpen}
onOpenChange={(open) => {
if (!open) {
setIsDeleteRepoDialogOpen(false);
setRepoToDelete(null);
}
}}
>
<DialogContent>
<DialogHeader>
<DialogTitle>Remove repository from Gitea Mirror?</DialogTitle>
<DialogDescription>
{repoToDelete?.fullName ?? "This repository"} will be deleted from Gitea Mirror only. The mirror on Gitea will remain untouched; remove it manually in Gitea if needed.
</DialogDescription>
</DialogHeader>
<DialogFooter>
<Button
variant="outline"
onClick={() => {
setIsDeleteRepoDialogOpen(false);
setRepoToDelete(null);
}}
disabled={isDeletingRepo}
>
Cancel
</Button>
<Button variant="destructive" onClick={handleDeleteRepository} disabled={isDeletingRepo}>
{isDeletingRepo ? (
<LoaderCircle className="h-4 w-4 animate-spin" />
) : (
<span className="flex items-center gap-2">
<Trash2 className="h-4 w-4" />
Delete
</span>
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
</div>
);
}

View File

@@ -1,7 +1,7 @@
import { useMemo, useRef } from "react";
import Fuse from "fuse.js";
import { useVirtualizer } from "@tanstack/react-virtual";
import { FlipHorizontal, GitFork, RefreshCw, RotateCcw, Star, Lock, Ban, Check, ChevronDown } from "lucide-react";
import { FlipHorizontal, GitFork, RefreshCw, RotateCcw, Star, Lock, Ban, Check, ChevronDown, Trash2 } from "lucide-react";
import { SiGithub, SiGitea } from "react-icons/si";
import type { Repository } from "@/lib/db/schema";
import { Button } from "@/components/ui/button";
@@ -23,6 +23,7 @@ import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
@@ -40,6 +41,7 @@ interface RepositoryTableProps {
selectedRepoIds: Set<string>;
onSelectionChange: (selectedIds: Set<string>) => void;
onRefresh?: () => Promise<void>;
onDelete?: (repoId: string) => void;
}
export default function RepositoryTable({
@@ -56,6 +58,7 @@ export default function RepositoryTable({
selectedRepoIds,
onSelectionChange,
onRefresh,
onDelete,
}: RepositoryTableProps) {
const tableParentRef = useRef<HTMLDivElement>(null);
const { giteaConfig } = useGiteaConfig();
@@ -676,6 +679,7 @@ export default function RepositoryTable({
onSync={() => onSync({ repoId: repo.id ?? "" })}
onRetry={() => onRetry({ repoId: repo.id ?? "" })}
onSkip={(skip) => onSkip({ repoId: repo.id ?? "", skip })}
onDelete={onDelete && repo.id ? () => onDelete(repo.id as string) : undefined}
/>
</div>
{/* Links */}
@@ -786,6 +790,7 @@ function RepoActionButton({
onSync,
onRetry,
onSkip,
onDelete,
}: {
repo: { id: string; status: string };
isLoading: boolean;
@@ -793,6 +798,7 @@ function RepoActionButton({
onSync: () => void;
onRetry: () => void;
onSkip: (skip: boolean) => void;
onDelete?: () => void;
}) {
// For ignored repos, show an "Include" action
if (repo.status === "ignored") {
@@ -849,7 +855,7 @@ function RepoActionButton({
);
}
// Show primary action with dropdown for skip option
// Show primary action with dropdown for additional actions
return (
<DropdownMenu>
<div className="flex">
@@ -886,6 +892,18 @@ function RepoActionButton({
<Ban className="h-4 w-4 mr-2" />
Ignore Repository
</DropdownMenuItem>
{onDelete && (
<>
<DropdownMenuSeparator />
<DropdownMenuItem
className="text-destructive focus:text-destructive"
onClick={onDelete}
>
<Trash2 className="h-4 w-4 mr-2" />
Delete from Mirror
</DropdownMenuItem>
</>
)}
</DropdownMenuContent>
</DropdownMenu>
);

View File

@@ -166,6 +166,8 @@ export const auth = betterAuth({
defaultOverrideUserInfo: true,
// Allow implicit sign up for new users
disableImplicitSignUp: false,
// Trust email_verified claims from the upstream provider so we can link by matching email
trustEmailVerified: true,
}),
],
});

View File

@@ -127,6 +127,7 @@ export const repositorySchema = z.object({
configId: z.string(),
name: z.string(),
fullName: z.string(),
normalizedFullName: z.string(),
url: z.url(),
cloneUrl: z.url(),
owner: z.string(),
@@ -163,6 +164,7 @@ export const repositorySchema = z.object({
lastMirrored: z.coerce.date().optional().nullable(),
errorMessage: z.string().optional().nullable(),
destinationOrg: z.string().optional().nullable(),
metadata: z.string().optional().nullable(), // JSON string for metadata sync state
createdAt: z.coerce.date(),
updatedAt: z.coerce.date(),
});
@@ -209,6 +211,7 @@ export const organizationSchema = z.object({
userId: z.string(),
configId: z.string(),
name: z.string(),
normalizedName: z.string(),
avatarUrl: z.string(),
membershipRole: z.enum(["member", "admin", "owner", "billing_manager"]).default("member"),
isIncluded: z.boolean().default(true),
@@ -334,6 +337,7 @@ export const repositories = sqliteTable("repositories", {
.references(() => configs.id),
name: text("name").notNull(),
fullName: text("full_name").notNull(),
normalizedFullName: text("normalized_full_name").notNull(),
url: text("url").notNull(),
cloneUrl: text("clone_url").notNull(),
owner: text("owner").notNull(),
@@ -373,6 +377,8 @@ export const repositories = sqliteTable("repositories", {
destinationOrg: text("destination_org"),
metadata: text("metadata"), // JSON string storing metadata sync state (issues, PRs, releases, etc.)
createdAt: integer("created_at", { mode: "timestamp" })
.notNull()
.default(sql`(unixepoch())`),
@@ -388,6 +394,7 @@ export const repositories = sqliteTable("repositories", {
index("idx_repositories_is_fork").on(table.isForked),
index("idx_repositories_is_starred").on(table.isStarred),
uniqueIndex("uniq_repositories_user_full_name").on(table.userId, table.fullName),
uniqueIndex("uniq_repositories_user_normalized_full_name").on(table.userId, table.normalizedFullName),
]);
export const mirrorJobs = sqliteTable("mirror_jobs", {
@@ -438,6 +445,7 @@ export const organizations = sqliteTable("organizations", {
.notNull()
.references(() => configs.id),
name: text("name").notNull(),
normalizedName: text("normalized_name").notNull(),
avatarUrl: text("avatar_url").notNull(),
@@ -469,6 +477,7 @@ export const organizations = sqliteTable("organizations", {
index("idx_organizations_config_id").on(table.configId),
index("idx_organizations_status").on(table.status),
index("idx_organizations_is_included").on(table.isIncluded),
uniqueIndex("uniq_organizations_user_normalized_name").on(table.userId, table.normalizedName),
]);
// ===== Better Auth Tables =====
@@ -502,6 +511,10 @@ export const accounts = sqliteTable("accounts", {
providerUserId: text("provider_user_id"), // Make nullable for email/password auth
accessToken: text("access_token"),
refreshToken: text("refresh_token"),
idToken: text("id_token"),
accessTokenExpiresAt: integer("access_token_expires_at", { mode: "timestamp" }),
refreshTokenExpiresAt: integer("refresh_token_expires_at", { mode: "timestamp" }),
scope: text("scope"),
expiresAt: integer("expires_at", { mode: "timestamp" }),
password: text("password"), // For credential provider
createdAt: integer("created_at", { mode: "timestamp" })

View File

@@ -8,6 +8,10 @@ mock.module("@/lib/helpers", () => ({
}));
const mockMirrorGitHubReleasesToGitea = mock(() => Promise.resolve());
const mockMirrorGitRepoIssuesToGitea = mock(() => Promise.resolve());
const mockMirrorGitRepoPullRequestsToGitea = mock(() => Promise.resolve());
const mockMirrorGitRepoLabelsToGitea = mock(() => Promise.resolve());
const mockMirrorGitRepoMilestonesToGitea = mock(() => Promise.resolve());
const mockGetGiteaRepoOwnerAsync = mock(() => Promise.resolve("starred"));
// Mock the database module
@@ -128,6 +132,36 @@ const mockHttpGet = mock(async (url: string, headers?: any) => {
headers: new Headers()
};
}
if (url.includes("/api/v1/repos/starred/metadata-repo")) {
return {
data: {
id: 790,
name: "metadata-repo",
mirror: true,
owner: { login: "starred" },
mirror_interval: "8h",
private: false,
},
status: 200,
statusText: "OK",
headers: new Headers(),
};
}
if (url.includes("/api/v1/repos/starred/already-synced-repo")) {
return {
data: {
id: 791,
name: "already-synced-repo",
mirror: true,
owner: { login: "starred" },
mirror_interval: "8h",
private: false,
},
status: 200,
statusText: "OK",
headers: new Headers(),
};
}
if (url.includes("/api/v1/repos/")) {
throw new MockHttpError("Not Found", 404, "Not Found");
}
@@ -224,6 +258,10 @@ describe("Enhanced Gitea Operations", () => {
mockDb.insert.mockClear();
mockDb.update.mockClear();
mockMirrorGitHubReleasesToGitea.mockClear();
mockMirrorGitRepoIssuesToGitea.mockClear();
mockMirrorGitRepoPullRequestsToGitea.mockClear();
mockMirrorGitRepoLabelsToGitea.mockClear();
mockMirrorGitRepoMilestonesToGitea.mockClear();
mockGetGiteaRepoOwnerAsync.mockClear();
mockGetGiteaRepoOwnerAsync.mockImplementation(() => Promise.resolve("starred"));
// Reset tracking variables
@@ -426,6 +464,10 @@ describe("Enhanced Gitea Operations", () => {
{
getGiteaRepoOwnerAsync: mockGetGiteaRepoOwnerAsync,
mirrorGitHubReleasesToGitea: mockMirrorGitHubReleasesToGitea,
mirrorGitRepoIssuesToGitea: mockMirrorGitRepoIssuesToGitea,
mirrorGitRepoPullRequestsToGitea: mockMirrorGitRepoPullRequestsToGitea,
mirrorGitRepoLabelsToGitea: mockMirrorGitRepoLabelsToGitea,
mirrorGitRepoMilestonesToGitea: mockMirrorGitRepoMilestonesToGitea,
}
)
).rejects.toThrow("Repository non-mirror-repo is not a mirror. Cannot sync.");
@@ -470,6 +512,10 @@ describe("Enhanced Gitea Operations", () => {
{
getGiteaRepoOwnerAsync: mockGetGiteaRepoOwnerAsync,
mirrorGitHubReleasesToGitea: mockMirrorGitHubReleasesToGitea,
mirrorGitRepoIssuesToGitea: mockMirrorGitRepoIssuesToGitea,
mirrorGitRepoPullRequestsToGitea: mockMirrorGitRepoPullRequestsToGitea,
mirrorGitRepoLabelsToGitea: mockMirrorGitRepoLabelsToGitea,
mirrorGitRepoMilestonesToGitea: mockMirrorGitRepoMilestonesToGitea,
}
);
@@ -482,6 +528,130 @@ describe("Enhanced Gitea Operations", () => {
expect(releaseCall.config.githubConfig?.token).toBe("github-token");
expect(releaseCall.octokit).toBeDefined();
});
test("mirrors metadata components when enabled and not previously synced", async () => {
const config: Partial<Config> = {
userId: "user123",
githubConfig: {
username: "testuser",
token: "github-token",
privateRepositories: true,
mirrorStarred: false,
},
giteaConfig: {
url: "https://gitea.example.com",
token: "encrypted-token",
defaultOwner: "testuser",
mirrorReleases: true,
mirrorMetadata: true,
mirrorIssues: true,
mirrorPullRequests: true,
mirrorLabels: true,
mirrorMilestones: true,
},
};
const repository: Repository = {
id: "repo789",
name: "metadata-repo",
fullName: "user/metadata-repo",
owner: "user",
cloneUrl: "https://github.com/user/metadata-repo.git",
isPrivate: false,
isStarred: false,
status: repoStatusEnum.parse("mirrored"),
visibility: "public",
userId: "user123",
createdAt: new Date(),
updatedAt: new Date(),
metadata: null,
};
await syncGiteaRepoEnhanced(
{ config, repository },
{
getGiteaRepoOwnerAsync: mockGetGiteaRepoOwnerAsync,
mirrorGitHubReleasesToGitea: mockMirrorGitHubReleasesToGitea,
mirrorGitRepoIssuesToGitea: mockMirrorGitRepoIssuesToGitea,
mirrorGitRepoPullRequestsToGitea: mockMirrorGitRepoPullRequestsToGitea,
mirrorGitRepoLabelsToGitea: mockMirrorGitRepoLabelsToGitea,
mirrorGitRepoMilestonesToGitea: mockMirrorGitRepoMilestonesToGitea,
}
);
expect(mockMirrorGitHubReleasesToGitea).toHaveBeenCalledTimes(1);
expect(mockMirrorGitRepoIssuesToGitea).toHaveBeenCalledTimes(1);
expect(mockMirrorGitRepoPullRequestsToGitea).toHaveBeenCalledTimes(1);
expect(mockMirrorGitRepoMilestonesToGitea).toHaveBeenCalledTimes(1);
// Labels should be skipped because issues already import them
expect(mockMirrorGitRepoLabelsToGitea).not.toHaveBeenCalled();
});
test("skips metadata mirroring when components already synced", async () => {
const config: Partial<Config> = {
userId: "user123",
githubConfig: {
username: "testuser",
token: "github-token",
privateRepositories: true,
mirrorStarred: false,
},
giteaConfig: {
url: "https://gitea.example.com",
token: "encrypted-token",
defaultOwner: "testuser",
mirrorReleases: false,
mirrorMetadata: true,
mirrorIssues: true,
mirrorPullRequests: true,
mirrorLabels: true,
mirrorMilestones: true,
},
};
const repository: Repository = {
id: "repo790",
name: "already-synced-repo",
fullName: "user/already-synced-repo",
owner: "user",
cloneUrl: "https://github.com/user/already-synced-repo.git",
isPrivate: false,
isStarred: false,
status: repoStatusEnum.parse("mirrored"),
visibility: "public",
userId: "user123",
createdAt: new Date(),
updatedAt: new Date(),
metadata: JSON.stringify({
components: {
releases: true,
issues: true,
pullRequests: true,
labels: true,
milestones: true,
},
lastSyncedAt: new Date().toISOString(),
}),
};
await syncGiteaRepoEnhanced(
{ config, repository },
{
getGiteaRepoOwnerAsync: mockGetGiteaRepoOwnerAsync,
mirrorGitHubReleasesToGitea: mockMirrorGitHubReleasesToGitea,
mirrorGitRepoIssuesToGitea: mockMirrorGitRepoIssuesToGitea,
mirrorGitRepoPullRequestsToGitea: mockMirrorGitRepoPullRequestsToGitea,
mirrorGitRepoLabelsToGitea: mockMirrorGitRepoLabelsToGitea,
mirrorGitRepoMilestonesToGitea: mockMirrorGitRepoMilestonesToGitea,
}
);
expect(mockMirrorGitHubReleasesToGitea).not.toHaveBeenCalled();
expect(mockMirrorGitRepoIssuesToGitea).not.toHaveBeenCalled();
expect(mockMirrorGitRepoPullRequestsToGitea).not.toHaveBeenCalled();
expect(mockMirrorGitRepoLabelsToGitea).not.toHaveBeenCalled();
expect(mockMirrorGitRepoMilestonesToGitea).not.toHaveBeenCalled();
});
});
describe("handleExistingNonMirrorRepo", () => {

View File

@@ -15,10 +15,18 @@ import { httpPost, httpGet, httpPatch, HttpError } from "./http-client";
import { db, repositories } from "./db";
import { eq } from "drizzle-orm";
import { repoStatusEnum } from "@/types/Repository";
import {
parseRepositoryMetadataState,
serializeRepositoryMetadataState,
} from "./metadata-state";
type SyncDependencies = {
getGiteaRepoOwnerAsync: typeof import("./gitea")["getGiteaRepoOwnerAsync"];
mirrorGitHubReleasesToGitea: typeof import("./gitea")["mirrorGitHubReleasesToGitea"];
mirrorGitRepoIssuesToGitea: typeof import("./gitea")["mirrorGitRepoIssuesToGitea"];
mirrorGitRepoPullRequestsToGitea: typeof import("./gitea")["mirrorGitRepoPullRequestsToGitea"];
mirrorGitRepoLabelsToGitea: typeof import("./gitea")["mirrorGitRepoLabelsToGitea"];
mirrorGitRepoMilestonesToGitea: typeof import("./gitea")["mirrorGitRepoMilestonesToGitea"];
};
/**
@@ -330,36 +338,236 @@ export async function syncGiteaRepoEnhanced({
Authorization: `token ${decryptedConfig.giteaConfig.token}`,
});
const metadataState = parseRepositoryMetadataState(repository.metadata);
let metadataUpdated = false;
const skipMetadataForStarred =
repository.isStarred && config.githubConfig?.starredCodeOnly;
let metadataOctokit: Octokit | null = null;
const ensureOctokit = (): Octokit | null => {
if (metadataOctokit) {
return metadataOctokit;
}
if (!decryptedConfig.githubConfig?.token) {
return null;
}
metadataOctokit = new Octokit({
auth: decryptedConfig.githubConfig.token,
});
return metadataOctokit;
};
const shouldMirrorReleases =
decryptedConfig.giteaConfig?.mirrorReleases &&
!(repository.isStarred && decryptedConfig.githubConfig?.starredCodeOnly);
!!config.giteaConfig?.mirrorReleases && !skipMetadataForStarred;
const shouldMirrorIssuesThisRun =
!!config.giteaConfig?.mirrorIssues &&
!skipMetadataForStarred &&
!metadataState.components.issues;
const shouldMirrorPullRequests =
!!config.giteaConfig?.mirrorPullRequests &&
!skipMetadataForStarred &&
!metadataState.components.pullRequests;
const shouldMirrorLabels =
!!config.giteaConfig?.mirrorLabels &&
!skipMetadataForStarred &&
!shouldMirrorIssuesThisRun &&
!metadataState.components.labels;
const shouldMirrorMilestones =
!!config.giteaConfig?.mirrorMilestones &&
!skipMetadataForStarred &&
!metadataState.components.milestones;
if (shouldMirrorReleases) {
if (!decryptedConfig.githubConfig?.token) {
const octokit = ensureOctokit();
if (!octokit) {
console.warn(
`[Sync] Skipping release mirroring for ${repository.name}: Missing GitHub token`
);
} else {
try {
const octokit = new Octokit({ auth: decryptedConfig.githubConfig.token });
await dependencies.mirrorGitHubReleasesToGitea({
config: decryptedConfig,
config,
octokit,
repository,
giteaOwner: repoOwner,
giteaRepoName: repository.name,
});
console.log(`[Sync] Mirrored releases for ${repository.name} after sync`);
metadataState.components.releases = true;
metadataUpdated = true;
console.log(
`[Sync] Mirrored releases for ${repository.name} after sync`
);
} catch (releaseError) {
console.error(
`[Sync] Failed to mirror releases for ${repository.name}: ${
releaseError instanceof Error ? releaseError.message : String(releaseError)
releaseError instanceof Error
? releaseError.message
: String(releaseError)
}`
);
}
}
}
if (shouldMirrorIssuesThisRun) {
const octokit = ensureOctokit();
if (!octokit) {
console.warn(
`[Sync] Skipping issue mirroring for ${repository.name}: Missing GitHub token`
);
} else {
try {
await dependencies.mirrorGitRepoIssuesToGitea({
config,
octokit,
repository,
giteaOwner: repoOwner,
giteaRepoName: repository.name,
});
metadataState.components.issues = true;
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Sync] Mirrored issues for ${repository.name} after sync`
);
} catch (issueError) {
console.error(
`[Sync] Failed to mirror issues for ${repository.name}: ${
issueError instanceof Error
? issueError.message
: String(issueError)
}`
);
}
}
} else if (
config.giteaConfig?.mirrorIssues &&
metadataState.components.issues
) {
console.log(
`[Sync] Issues already mirrored for ${repository.name}; skipping`
);
}
if (shouldMirrorPullRequests) {
const octokit = ensureOctokit();
if (!octokit) {
console.warn(
`[Sync] Skipping pull request mirroring for ${repository.name}: Missing GitHub token`
);
} else {
try {
await dependencies.mirrorGitRepoPullRequestsToGitea({
config,
octokit,
repository,
giteaOwner: repoOwner,
giteaRepoName: repository.name,
});
metadataState.components.pullRequests = true;
metadataUpdated = true;
console.log(
`[Sync] Mirrored pull requests for ${repository.name} after sync`
);
} catch (prError) {
console.error(
`[Sync] Failed to mirror pull requests for ${repository.name}: ${
prError instanceof Error ? prError.message : String(prError)
}`
);
}
}
} else if (
config.giteaConfig?.mirrorPullRequests &&
metadataState.components.pullRequests
) {
console.log(
`[Sync] Pull requests already mirrored for ${repository.name}; skipping`
);
}
if (shouldMirrorLabels) {
const octokit = ensureOctokit();
if (!octokit) {
console.warn(
`[Sync] Skipping label mirroring for ${repository.name}: Missing GitHub token`
);
} else {
try {
await dependencies.mirrorGitRepoLabelsToGitea({
config,
octokit,
repository,
giteaOwner: repoOwner,
giteaRepoName: repository.name,
});
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Sync] Mirrored labels for ${repository.name} after sync`
);
} catch (labelError) {
console.error(
`[Sync] Failed to mirror labels for ${repository.name}: ${
labelError instanceof Error
? labelError.message
: String(labelError)
}`
);
}
}
} else if (
config.giteaConfig?.mirrorLabels &&
metadataState.components.labels
) {
console.log(
`[Sync] Labels already mirrored for ${repository.name}; skipping`
);
}
if (shouldMirrorMilestones) {
const octokit = ensureOctokit();
if (!octokit) {
console.warn(
`[Sync] Skipping milestone mirroring for ${repository.name}: Missing GitHub token`
);
} else {
try {
await dependencies.mirrorGitRepoMilestonesToGitea({
config,
octokit,
repository,
giteaOwner: repoOwner,
giteaRepoName: repository.name,
});
metadataState.components.milestones = true;
metadataUpdated = true;
console.log(
`[Sync] Mirrored milestones for ${repository.name} after sync`
);
} catch (milestoneError) {
console.error(
`[Sync] Failed to mirror milestones for ${repository.name}: ${
milestoneError instanceof Error
? milestoneError.message
: String(milestoneError)
}`
);
}
}
} else if (
config.giteaConfig?.mirrorMilestones &&
metadataState.components.milestones
) {
console.log(
`[Sync] Milestones already mirrored for ${repository.name}; skipping`
);
}
if (metadataUpdated) {
metadataState.lastSyncedAt = new Date().toISOString();
}
// Mark repo as "synced" in DB
await db
.update(repositories)
@@ -369,6 +577,9 @@ export async function syncGiteaRepoEnhanced({
lastMirrored: new Date(),
errorMessage: null,
mirroredLocation: `${repoOwner}/${repository.name}`,
metadata: metadataUpdated
? serializeRepositoryMetadataState(metadataState)
: repository.metadata ?? null,
})
.where(eq(repositories.id, repository.id!));

View File

@@ -13,6 +13,10 @@ import { db, organizations, repositories } from "./db";
import { eq, and } from "drizzle-orm";
import { decryptConfigTokens } from "./utils/config-encryption";
import { formatDateShort } from "./utils";
import {
parseRepositoryMetadataState,
serializeRepositoryMetadataState,
} from "./metadata-state";
/**
* Helper function to get organization configuration including destination override
@@ -587,12 +591,18 @@ export const mirrorGithubRepoToGitea = async ({
}
);
//mirror releases
// Skip releases for starred repos if starredCodeOnly is enabled
const shouldMirrorReleases = config.giteaConfig?.mirrorReleases &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const metadataState = parseRepositoryMetadataState(repository.metadata);
let metadataUpdated = false;
const skipMetadataForStarred =
repository.isStarred && config.githubConfig?.starredCodeOnly;
console.log(`[Metadata] Release mirroring check: mirrorReleases=${config.giteaConfig?.mirrorReleases}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorReleases=${shouldMirrorReleases}`);
// Mirror releases if enabled (always allowed to rerun for updates)
const shouldMirrorReleases =
!!config.giteaConfig?.mirrorReleases && !skipMetadataForStarred;
console.log(
`[Metadata] Release mirroring check: mirrorReleases=${config.giteaConfig?.mirrorReleases}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorReleases=${shouldMirrorReleases}`
);
if (shouldMirrorReleases) {
try {
@@ -603,21 +613,32 @@ export const mirrorGithubRepoToGitea = async ({
giteaOwner: repoOwner,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored releases for ${repository.name}`);
metadataState.components.releases = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored releases for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror releases for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror releases for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other operations even if releases fail
}
}
// clone issues
// Skip issues for starred repos if starredCodeOnly is enabled
const shouldMirrorIssues = config.giteaConfig?.mirrorIssues &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
// Determine metadata operations to avoid duplicates
const shouldMirrorIssuesThisRun =
!!config.giteaConfig?.mirrorIssues &&
!skipMetadataForStarred &&
!metadataState.components.issues;
console.log(`[Metadata] Issue mirroring check: mirrorIssues=${config.giteaConfig?.mirrorIssues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorIssues=${shouldMirrorIssues}`);
console.log(
`[Metadata] Issue mirroring check: mirrorIssues=${config.giteaConfig?.mirrorIssues}, alreadyMirrored=${metadataState.components.issues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorIssues=${shouldMirrorIssuesThisRun}`
);
if (shouldMirrorIssues) {
if (shouldMirrorIssuesThisRun) {
try {
await mirrorGitRepoIssuesToGitea({
config,
@@ -626,19 +647,34 @@ export const mirrorGithubRepoToGitea = async ({
giteaOwner: repoOwner,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored issues for ${repository.name}`);
metadataState.components.issues = true;
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored issues for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror issues for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror issues for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if issues fail
}
} else if (config.giteaConfig?.mirrorIssues && metadataState.components.issues) {
console.log(
`[Metadata] Issues already mirrored for ${repository.name}; skipping to avoid duplicates`
);
}
// Mirror pull requests if enabled
// Skip pull requests for starred repos if starredCodeOnly is enabled
const shouldMirrorPullRequests = config.giteaConfig?.mirrorPullRequests &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorPullRequests =
!!config.giteaConfig?.mirrorPullRequests &&
!skipMetadataForStarred &&
!metadataState.components.pullRequests;
console.log(`[Metadata] Pull request mirroring check: mirrorPullRequests=${config.giteaConfig?.mirrorPullRequests}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorPullRequests=${shouldMirrorPullRequests}`);
console.log(
`[Metadata] Pull request mirroring check: mirrorPullRequests=${config.giteaConfig?.mirrorPullRequests}, alreadyMirrored=${metadataState.components.pullRequests}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorPullRequests=${shouldMirrorPullRequests}`
);
if (shouldMirrorPullRequests) {
try {
@@ -649,19 +685,37 @@ export const mirrorGithubRepoToGitea = async ({
giteaOwner: repoOwner,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored pull requests for ${repository.name}`);
metadataState.components.pullRequests = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored pull requests for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror pull requests for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror pull requests for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if PRs fail
}
} else if (
config.giteaConfig?.mirrorPullRequests &&
metadataState.components.pullRequests
) {
console.log(
`[Metadata] Pull requests already mirrored for ${repository.name}; skipping`
);
}
// Mirror labels if enabled (and not already done via issues)
// Skip labels for starred repos if starredCodeOnly is enabled
const shouldMirrorLabels = config.giteaConfig?.mirrorLabels && !shouldMirrorIssues &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorLabels =
!!config.giteaConfig?.mirrorLabels &&
!skipMetadataForStarred &&
!shouldMirrorIssuesThisRun &&
!metadataState.components.labels;
console.log(`[Metadata] Label mirroring check: mirrorLabels=${config.giteaConfig?.mirrorLabels}, shouldMirrorIssues=${shouldMirrorIssues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorLabels=${shouldMirrorLabels}`);
console.log(
`[Metadata] Label mirroring check: mirrorLabels=${config.giteaConfig?.mirrorLabels}, alreadyMirrored=${metadataState.components.labels}, issuesRunning=${shouldMirrorIssuesThisRun}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorLabels=${shouldMirrorLabels}`
);
if (shouldMirrorLabels) {
try {
@@ -672,19 +726,33 @@ export const mirrorGithubRepoToGitea = async ({
giteaOwner: repoOwner,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored labels for ${repository.name}`);
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored labels for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror labels for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror labels for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if labels fail
}
} else if (config.giteaConfig?.mirrorLabels && metadataState.components.labels) {
console.log(
`[Metadata] Labels already mirrored for ${repository.name}; skipping`
);
}
// Mirror milestones if enabled
// Skip milestones for starred repos if starredCodeOnly is enabled
const shouldMirrorMilestones = config.giteaConfig?.mirrorMilestones &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorMilestones =
!!config.giteaConfig?.mirrorMilestones &&
!skipMetadataForStarred &&
!metadataState.components.milestones;
console.log(`[Metadata] Milestone mirroring check: mirrorMilestones=${config.giteaConfig?.mirrorMilestones}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorMilestones=${shouldMirrorMilestones}`);
console.log(
`[Metadata] Milestone mirroring check: mirrorMilestones=${config.giteaConfig?.mirrorMilestones}, alreadyMirrored=${metadataState.components.milestones}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorMilestones=${shouldMirrorMilestones}`
);
if (shouldMirrorMilestones) {
try {
@@ -695,11 +763,30 @@ export const mirrorGithubRepoToGitea = async ({
giteaOwner: repoOwner,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored milestones for ${repository.name}`);
metadataState.components.milestones = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored milestones for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror milestones for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror milestones for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if milestones fail
}
} else if (
config.giteaConfig?.mirrorMilestones &&
metadataState.components.milestones
) {
console.log(
`[Metadata] Milestones already mirrored for ${repository.name}; skipping`
);
}
if (metadataUpdated) {
metadataState.lastSyncedAt = new Date().toISOString();
}
console.log(`Repository ${repository.name} mirrored successfully as ${targetRepoName}`);
@@ -713,6 +800,9 @@ export const mirrorGithubRepoToGitea = async ({
lastMirrored: new Date(),
errorMessage: null,
mirroredLocation: `${repoOwner}/${targetRepoName}`,
metadata: metadataUpdated
? serializeRepositoryMetadataState(metadataState)
: repository.metadata ?? null,
})
.where(eq(repositories.id, repository.id!));
@@ -1053,12 +1143,17 @@ export async function mirrorGitHubRepoToGiteaOrg({
}
);
//mirror releases
// Skip releases for starred repos if starredCodeOnly is enabled
const shouldMirrorReleases = config.giteaConfig?.mirrorReleases &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const metadataState = parseRepositoryMetadataState(repository.metadata);
let metadataUpdated = false;
const skipMetadataForStarred =
repository.isStarred && config.githubConfig?.starredCodeOnly;
console.log(`[Metadata] Release mirroring check: mirrorReleases=${config.giteaConfig?.mirrorReleases}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorReleases=${shouldMirrorReleases}`);
const shouldMirrorReleases =
!!config.giteaConfig?.mirrorReleases && !skipMetadataForStarred;
console.log(
`[Metadata] Release mirroring check: mirrorReleases=${config.giteaConfig?.mirrorReleases}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorReleases=${shouldMirrorReleases}`
);
if (shouldMirrorReleases) {
try {
@@ -1069,21 +1164,31 @@ export async function mirrorGitHubRepoToGiteaOrg({
giteaOwner: orgName,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored releases for ${repository.name}`);
metadataState.components.releases = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored releases for ${repository.name}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror releases for ${repository.name}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror releases for ${repository.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other operations even if releases fail
}
}
// Clone issues
// Skip issues for starred repos if starredCodeOnly is enabled
const shouldMirrorIssues = config.giteaConfig?.mirrorIssues &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorIssuesThisRun =
!!config.giteaConfig?.mirrorIssues &&
!skipMetadataForStarred &&
!metadataState.components.issues;
console.log(`[Metadata] Issue mirroring check: mirrorIssues=${config.giteaConfig?.mirrorIssues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorIssues=${shouldMirrorIssues}`);
console.log(
`[Metadata] Issue mirroring check: mirrorIssues=${config.giteaConfig?.mirrorIssues}, alreadyMirrored=${metadataState.components.issues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorIssues=${shouldMirrorIssuesThisRun}`
);
if (shouldMirrorIssues) {
if (shouldMirrorIssuesThisRun) {
try {
await mirrorGitRepoIssuesToGitea({
config,
@@ -1092,19 +1197,37 @@ export async function mirrorGitHubRepoToGiteaOrg({
giteaOwner: orgName,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored issues for ${repository.name} to org ${orgName}/${targetRepoName}`);
metadataState.components.issues = true;
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored issues for ${repository.name} to org ${orgName}/${targetRepoName}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror issues for ${repository.name} to org ${orgName}/${targetRepoName}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror issues for ${repository.name} to org ${orgName}/${targetRepoName}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if issues fail
}
} else if (
config.giteaConfig?.mirrorIssues &&
metadataState.components.issues
) {
console.log(
`[Metadata] Issues already mirrored for ${repository.name}; skipping`
);
}
// Mirror pull requests if enabled
// Skip pull requests for starred repos if starredCodeOnly is enabled
const shouldMirrorPullRequests = config.giteaConfig?.mirrorPullRequests &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorPullRequests =
!!config.giteaConfig?.mirrorPullRequests &&
!skipMetadataForStarred &&
!metadataState.components.pullRequests;
console.log(`[Metadata] Pull request mirroring check: mirrorPullRequests=${config.giteaConfig?.mirrorPullRequests}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorPullRequests=${shouldMirrorPullRequests}`);
console.log(
`[Metadata] Pull request mirroring check: mirrorPullRequests=${config.giteaConfig?.mirrorPullRequests}, alreadyMirrored=${metadataState.components.pullRequests}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorPullRequests=${shouldMirrorPullRequests}`
);
if (shouldMirrorPullRequests) {
try {
@@ -1115,19 +1238,37 @@ export async function mirrorGitHubRepoToGiteaOrg({
giteaOwner: orgName,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored pull requests for ${repository.name} to org ${orgName}/${targetRepoName}`);
metadataState.components.pullRequests = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored pull requests for ${repository.name} to org ${orgName}/${targetRepoName}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror pull requests for ${repository.name} to org ${orgName}/${targetRepoName}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror pull requests for ${repository.name} to org ${orgName}/${targetRepoName}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if PRs fail
}
} else if (
config.giteaConfig?.mirrorPullRequests &&
metadataState.components.pullRequests
) {
console.log(
`[Metadata] Pull requests already mirrored for ${repository.name}; skipping`
);
}
// Mirror labels if enabled (and not already done via issues)
// Skip labels for starred repos if starredCodeOnly is enabled
const shouldMirrorLabels = config.giteaConfig?.mirrorLabels && !shouldMirrorIssues &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorLabels =
!!config.giteaConfig?.mirrorLabels &&
!skipMetadataForStarred &&
!shouldMirrorIssuesThisRun &&
!metadataState.components.labels;
console.log(`[Metadata] Label mirroring check: mirrorLabels=${config.giteaConfig?.mirrorLabels}, shouldMirrorIssues=${shouldMirrorIssues}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorLabels=${shouldMirrorLabels}`);
console.log(
`[Metadata] Label mirroring check: mirrorLabels=${config.giteaConfig?.mirrorLabels}, alreadyMirrored=${metadataState.components.labels}, issuesRunning=${shouldMirrorIssuesThisRun}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorLabels=${shouldMirrorLabels}`
);
if (shouldMirrorLabels) {
try {
@@ -1138,19 +1279,36 @@ export async function mirrorGitHubRepoToGiteaOrg({
giteaOwner: orgName,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored labels for ${repository.name} to org ${orgName}/${targetRepoName}`);
metadataState.components.labels = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored labels for ${repository.name} to org ${orgName}/${targetRepoName}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror labels for ${repository.name} to org ${orgName}/${targetRepoName}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror labels for ${repository.name} to org ${orgName}/${targetRepoName}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if labels fail
}
} else if (
config.giteaConfig?.mirrorLabels &&
metadataState.components.labels
) {
console.log(
`[Metadata] Labels already mirrored for ${repository.name}; skipping`
);
}
// Mirror milestones if enabled
// Skip milestones for starred repos if starredCodeOnly is enabled
const shouldMirrorMilestones = config.giteaConfig?.mirrorMilestones &&
!(repository.isStarred && config.githubConfig?.starredCodeOnly);
const shouldMirrorMilestones =
!!config.giteaConfig?.mirrorMilestones &&
!skipMetadataForStarred &&
!metadataState.components.milestones;
console.log(`[Metadata] Milestone mirroring check: mirrorMilestones=${config.giteaConfig?.mirrorMilestones}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorMilestones=${shouldMirrorMilestones}`);
console.log(
`[Metadata] Milestone mirroring check: mirrorMilestones=${config.giteaConfig?.mirrorMilestones}, alreadyMirrored=${metadataState.components.milestones}, isStarred=${repository.isStarred}, starredCodeOnly=${config.githubConfig?.starredCodeOnly}, shouldMirrorMilestones=${shouldMirrorMilestones}`
);
if (shouldMirrorMilestones) {
try {
@@ -1161,11 +1319,30 @@ export async function mirrorGitHubRepoToGiteaOrg({
giteaOwner: orgName,
giteaRepoName: targetRepoName,
});
console.log(`[Metadata] Successfully mirrored milestones for ${repository.name} to org ${orgName}/${targetRepoName}`);
metadataState.components.milestones = true;
metadataUpdated = true;
console.log(
`[Metadata] Successfully mirrored milestones for ${repository.name} to org ${orgName}/${targetRepoName}`
);
} catch (error) {
console.error(`[Metadata] Failed to mirror milestones for ${repository.name} to org ${orgName}/${targetRepoName}: ${error instanceof Error ? error.message : String(error)}`);
console.error(
`[Metadata] Failed to mirror milestones for ${repository.name} to org ${orgName}/${targetRepoName}: ${
error instanceof Error ? error.message : String(error)
}`
);
// Continue with other metadata operations even if milestones fail
}
} else if (
config.giteaConfig?.mirrorMilestones &&
metadataState.components.milestones
) {
console.log(
`[Metadata] Milestones already mirrored for ${repository.name}; skipping`
);
}
if (metadataUpdated) {
metadataState.lastSyncedAt = new Date().toISOString();
}
console.log(
@@ -1181,6 +1358,9 @@ export async function mirrorGitHubRepoToGiteaOrg({
lastMirrored: new Date(),
errorMessage: null,
mirroredLocation: `${orgName}/${targetRepoName}`,
metadata: metadataUpdated
? serializeRepositoryMetadataState(metadataState)
: repository.metadata ?? null,
})
.where(eq(repositories.id, repository.id!));
@@ -1812,12 +1992,20 @@ export async function mirrorGitHubReleasesToGitea({
let mirroredCount = 0;
let skippedCount = 0;
// Sort releases by created_at to ensure we get the most recent ones
const sortedReleases = releases.data.sort((a, b) =>
new Date(b.created_at).getTime() - new Date(a.created_at).getTime()
).slice(0, releaseLimit);
const getReleaseTimestamp = (release: typeof releases.data[number]) => {
const sourceDate = release.created_at ?? release.published_at ?? "";
const timestamp = sourceDate ? new Date(sourceDate).getTime() : 0;
return Number.isFinite(timestamp) ? timestamp : 0;
};
for (const release of sortedReleases) {
// Capture the latest releases, then process them oldest-to-newest so Gitea mirrors keep chronological order
const releasesToProcess = releases.data
.slice()
.sort((a, b) => getReleaseTimestamp(b) - getReleaseTimestamp(a))
.slice(0, releaseLimit)
.sort((a, b) => getReleaseTimestamp(a) - getReleaseTimestamp(b));
for (const release of releasesToProcess) {
try {
// Check if release already exists
const existingReleasesResponse = await httpGet(

75
src/lib/metadata-state.ts Normal file
View File

@@ -0,0 +1,75 @@
interface MetadataComponentsState {
releases: boolean;
issues: boolean;
pullRequests: boolean;
labels: boolean;
milestones: boolean;
}
export interface RepositoryMetadataState {
components: MetadataComponentsState;
lastSyncedAt?: string;
}
const defaultComponents: MetadataComponentsState = {
releases: false,
issues: false,
pullRequests: false,
labels: false,
milestones: false,
};
export function createDefaultMetadataState(): RepositoryMetadataState {
return {
components: { ...defaultComponents },
};
}
export function parseRepositoryMetadataState(
raw: unknown
): RepositoryMetadataState {
const base = createDefaultMetadataState();
if (!raw) {
return base;
}
let parsed: any = raw;
if (typeof raw === "string") {
try {
parsed = JSON.parse(raw);
} catch {
return base;
}
}
if (!parsed || typeof parsed !== "object") {
return base;
}
if (parsed.components && typeof parsed.components === "object") {
base.components = {
...base.components,
releases: Boolean(parsed.components.releases),
issues: Boolean(parsed.components.issues),
pullRequests: Boolean(parsed.components.pullRequests),
labels: Boolean(parsed.components.labels),
milestones: Boolean(parsed.components.milestones),
};
}
if (typeof parsed.lastSyncedAt === "string") {
base.lastSyncedAt = parsed.lastSyncedAt;
} else if (typeof parsed.lastMetadataSync === "string") {
base.lastSyncedAt = parsed.lastMetadataSync;
}
return base;
}
export function serializeRepositoryMetadataState(
state: RepositoryMetadataState
): string {
return JSON.stringify(state);
}

View File

@@ -62,6 +62,7 @@ describe('normalizeGitRepoToInsert', () => {
expect(insert.description).toBeNull();
expect(insert.lastMirrored).toBeNull();
expect(insert.errorMessage).toBeNull();
expect(insert.normalizedFullName).toBe(repo.fullName.toLowerCase());
});
});
@@ -72,4 +73,3 @@ describe('calcBatchSizeForInsert', () => {
expect(batch * 29).toBeLessThanOrEqual(999);
});
});

View File

@@ -33,6 +33,7 @@ export function normalizeGitRepoToInsert(
configId,
name: repo.name,
fullName: repo.fullName,
normalizedFullName: repo.fullName.toLowerCase(),
url: repo.url,
cloneUrl: repo.cloneUrl,
owner: repo.owner,
@@ -68,4 +69,3 @@ export function calcBatchSizeForInsert(columnCount: number, maxParams = 999): nu
const effectiveMax = Math.max(1, maxParams - safety);
return Math.max(1, Math.floor(effectiveMax / columnCount));
}

View File

@@ -99,12 +99,12 @@ async function runScheduledSync(config: any): Promise<void> {
// Check for new repositories
const existingRepos = await db
.select({ fullName: repositories.fullName })
.select({ normalizedFullName: repositories.normalizedFullName })
.from(repositories)
.where(eq(repositories.userId, userId));
const existingRepoNames = new Set(existingRepos.map(r => r.fullName));
const newRepos = allGithubRepos.filter(r => !existingRepoNames.has(r.fullName));
const existingRepoNames = new Set(existingRepos.map(r => r.normalizedFullName));
const newRepos = allGithubRepos.filter(r => !existingRepoNames.has(r.fullName.toLowerCase()));
if (newRepos.length > 0) {
console.log(`[Scheduler] Found ${newRepos.length} new repositories for user ${userId}`);
@@ -123,7 +123,7 @@ async function runScheduledSync(config: any): Promise<void> {
await db
.insert(repositories)
.values(batch)
.onConflictDoNothing({ target: [repositories.userId, repositories.fullName] });
.onConflictDoNothing({ target: [repositories.userId, repositories.normalizedFullName] });
}
console.log(`[Scheduler] Successfully imported ${newRepos.length} new repositories for user ${userId}`);
} else {
@@ -432,12 +432,12 @@ async function performInitialAutoStart(): Promise<void> {
// Check for new repositories
const existingRepos = await db
.select({ fullName: repositories.fullName })
.select({ normalizedFullName: repositories.normalizedFullName })
.from(repositories)
.where(eq(repositories.userId, config.userId));
const existingRepoNames = new Set(existingRepos.map(r => r.fullName));
const reposToImport = allGithubRepos.filter(r => !existingRepoNames.has(r.fullName));
const existingRepoNames = new Set(existingRepos.map(r => r.normalizedFullName));
const reposToImport = allGithubRepos.filter(r => !existingRepoNames.has(r.fullName.toLowerCase()));
if (reposToImport.length > 0) {
console.log(`[Scheduler] Importing ${reposToImport.length} repositories for user ${config.userId}...`);
@@ -456,7 +456,7 @@ async function performInitialAutoStart(): Promise<void> {
await db
.insert(repositories)
.values(batch)
.onConflictDoNothing({ target: [repositories.userId, repositories.fullName] });
.onConflictDoNothing({ target: [repositories.userId, repositories.normalizedFullName] });
}
console.log(`[Scheduler] Successfully imported ${reposToImport.length} repositories`);
} else {

View File

@@ -24,6 +24,7 @@ describe("normalizeOidcProviderConfig", () => {
expect(result.oidcConfig.userInfoEndpoint).toBe("https://auth.example.com/userinfo");
expect(result.oidcConfig.scopes).toEqual(["openid", "email"]);
expect(result.oidcConfig.pkce).toBe(false);
expect(result.oidcConfig.discoveryEndpoint).toBe("https://auth.example.com/.well-known/openid-configuration");
});
it("derives missing fields from discovery", async () => {
@@ -46,6 +47,24 @@ describe("normalizeOidcProviderConfig", () => {
expect(result.oidcConfig.jwksEndpoint).toBe("https://auth.example.com/jwks");
expect(result.oidcConfig.userInfoEndpoint).toBe("https://auth.example.com/userinfo");
expect(result.oidcConfig.scopes).toEqual(["openid", "email", "profile"]);
expect(result.oidcConfig.discoveryEndpoint).toBe("https://auth.example.com/.well-known/openid-configuration");
});
it("preserves trailing slash issuers when building discovery endpoints", async () => {
const trailingIssuer = "https://auth.example.com/application/o/example/";
const requestedUrls: string[] = [];
const fetchMock: typeof fetch = async (url) => {
requestedUrls.push(typeof url === "string" ? url : url.url);
return new Response(JSON.stringify({
authorization_endpoint: "https://auth.example.com/application/o/example/auth",
token_endpoint: "https://auth.example.com/application/o/example/token",
}));
};
const result = await normalizeOidcProviderConfig(trailingIssuer, {}, fetchMock);
expect(requestedUrls[0]).toBe("https://auth.example.com/application/o/example/.well-known/openid-configuration");
expect(result.oidcConfig.discoveryEndpoint).toBe("https://auth.example.com/application/o/example/.well-known/openid-configuration");
});
it("throws for invalid issuer URL", async () => {

View File

@@ -131,18 +131,21 @@ export async function normalizeOidcProviderConfig(
throw new OidcConfigError("Issuer is required");
}
let normalizedIssuer: string;
const trimmedIssuer = issuer.trim();
try {
const issuerUrl = new URL(issuer.trim());
normalizedIssuer = issuerUrl.toString().replace(/\/$/, "");
// Validate issuer but keep caller-provided formatting so we don't break provider expectations
new URL(trimmedIssuer);
} catch {
throw new OidcConfigError(`Invalid issuer URL: ${issuer}`);
}
const issuerForDiscovery = trimmedIssuer.replace(/\/$/, "");
const discoveryEndpoint = cleanUrl(
rawConfig.discoveryEndpoint,
"discovery endpoint",
) ?? `${normalizedIssuer}/.well-known/openid-configuration`;
) ?? `${issuerForDiscovery}/.well-known/openid-configuration`;
const authorizationEndpoint = cleanUrl(rawConfig.authorizationEndpoint, "authorization endpoint");
const tokenEndpoint = cleanUrl(rawConfig.tokenEndpoint, "token endpoint");

View File

@@ -29,12 +29,13 @@ export async function POST(context: APIContext) {
);
}
// Validate issuer URL format
// Validate issuer URL format while preserving trailing slash when provided
let validatedIssuer = issuer;
if (issuer && typeof issuer === 'string' && issuer.trim() !== '') {
try {
const issuerUrl = new URL(issuer.trim());
validatedIssuer = issuerUrl.toString().replace(/\/$/, ''); // Remove trailing slash
const trimmedIssuer = issuer.trim();
new URL(trimmedIssuer);
validatedIssuer = trimmedIssuer;
} catch (e) {
return new Response(
JSON.stringify({ error: `Invalid issuer URL format: ${issuer}` }),

View File

@@ -1,5 +1,5 @@
import type { APIRoute } from "astro";
import { db, organizations } from "@/lib/db";
import { db, organizations, repositories } from "@/lib/db";
import { eq, and } from "drizzle-orm";
import { createSecureErrorResponse } from "@/lib/utils";
import { requireAuth } from "@/lib/utils/auth-helpers";
@@ -61,3 +61,60 @@ export const PATCH: APIRoute = async (context) => {
return createSecureErrorResponse(error, "Update organization destination", 500);
}
};
export const DELETE: APIRoute = async (context) => {
try {
const { user, response } = await requireAuth(context);
if (response) return response;
const userId = user!.id;
const orgId = context.params.id;
if (!orgId) {
return new Response(
JSON.stringify({ error: "Organization ID is required" }),
{
status: 400,
headers: { "Content-Type": "application/json" },
}
);
}
const [existingOrg] = await db
.select()
.from(organizations)
.where(and(eq(organizations.id, orgId), eq(organizations.userId, userId)))
.limit(1);
if (!existingOrg) {
return new Response(
JSON.stringify({ error: "Organization not found" }),
{
status: 404,
headers: { "Content-Type": "application/json" },
}
);
}
await db.delete(repositories).where(
and(
eq(repositories.userId, userId),
eq(repositories.organization, existingOrg.name)
)
);
await db
.delete(organizations)
.where(and(eq(organizations.id, orgId), eq(organizations.userId, userId)));
return new Response(
JSON.stringify({ success: true }),
{
status: 200,
headers: { "Content-Type": "application/json" },
}
);
} catch (error) {
return createSecureErrorResponse(error, "Delete organization", 500);
}
};

View File

@@ -1,5 +1,5 @@
import type { APIRoute } from "astro";
import { db, repositories } from "@/lib/db";
import { db, repositories, mirrorJobs } from "@/lib/db";
import { eq, and } from "drizzle-orm";
import { createSecureErrorResponse } from "@/lib/utils";
import { requireAuth } from "@/lib/utils/auth-helpers";
@@ -61,3 +61,54 @@ export const PATCH: APIRoute = async (context) => {
return createSecureErrorResponse(error, "Update repository destination", 500);
}
};
export const DELETE: APIRoute = async (context) => {
try {
const { user, response } = await requireAuth(context);
if (response) return response;
const userId = user!.id;
const repoId = context.params.id;
if (!repoId) {
return new Response(JSON.stringify({ error: "Repository ID is required" }), {
status: 400,
headers: { "Content-Type": "application/json" },
});
}
const [existingRepo] = await db
.select()
.from(repositories)
.where(and(eq(repositories.id, repoId), eq(repositories.userId, userId)))
.limit(1);
if (!existingRepo) {
return new Response(
JSON.stringify({ error: "Repository not found" }),
{
status: 404,
headers: { "Content-Type": "application/json" },
}
);
}
await db
.delete(repositories)
.where(and(eq(repositories.id, repoId), eq(repositories.userId, userId)));
await db
.delete(mirrorJobs)
.where(and(eq(mirrorJobs.repositoryId, repoId), eq(mirrorJobs.userId, userId)));
return new Response(
JSON.stringify({ success: true }),
{
status: 200,
headers: { "Content-Type": "application/json" },
}
);
} catch (error) {
return createSecureErrorResponse(error, "Delete repository", 500);
}
};

View File

@@ -17,11 +17,11 @@ export async function POST(context: APIContext) {
});
}
// Validate issuer URL format
let cleanIssuer: string;
// Validate issuer URL format while keeping trailing slash if provided
const trimmedIssuer = issuer.trim();
let parsedIssuer: URL;
try {
const issuerUrl = new URL(issuer.trim());
cleanIssuer = issuerUrl.toString().replace(/\/$/, ""); // Remove trailing slash
parsedIssuer = new URL(trimmedIssuer);
} catch (e) {
return new Response(
JSON.stringify({
@@ -35,7 +35,8 @@ export async function POST(context: APIContext) {
);
}
const discoveryUrl = `${cleanIssuer}/.well-known/openid-configuration`;
const issuerForDiscovery = trimmedIssuer.replace(/\/$/, "");
const discoveryUrl = `${issuerForDiscovery}/.well-known/openid-configuration`;
try {
// Fetch OIDC discovery document with timeout
@@ -52,9 +53,9 @@ export async function POST(context: APIContext) {
});
} catch (fetchError) {
if (fetchError instanceof Error && fetchError.name === 'AbortError') {
throw new Error(`Request timeout: The OIDC provider at ${cleanIssuer} did not respond within 10 seconds`);
throw new Error(`Request timeout: The OIDC provider at ${trimmedIssuer} did not respond within 10 seconds`);
}
throw new Error(`Network error: Could not connect to ${cleanIssuer}. Please verify the URL is correct and accessible.`);
throw new Error(`Network error: Could not connect to ${trimmedIssuer}. Please verify the URL is correct and accessible.`);
} finally {
clearTimeout(timeoutId);
}
@@ -63,7 +64,7 @@ export async function POST(context: APIContext) {
if (response.status === 404) {
throw new Error(`OIDC discovery document not found at ${discoveryUrl}. For Authentik, ensure you're using the correct application slug in the URL.`);
} else if (response.status >= 500) {
throw new Error(`OIDC provider error (${response.status}): The server at ${cleanIssuer} returned an error.`);
throw new Error(`OIDC provider error (${response.status}): The server at ${trimmedIssuer} returned an error.`);
} else {
throw new Error(`Failed to fetch discovery document (${response.status}): ${response.statusText}`);
}
@@ -73,12 +74,12 @@ export async function POST(context: APIContext) {
try {
config = await response.json();
} catch (parseError) {
throw new Error(`Invalid response: The discovery document from ${cleanIssuer} is not valid JSON.`);
throw new Error(`Invalid response: The discovery document from ${trimmedIssuer} is not valid JSON.`);
}
// Extract the essential endpoints
const discoveredConfig = {
issuer: config.issuer || cleanIssuer,
issuer: config.issuer || trimmedIssuer,
authorizationEndpoint: config.authorization_endpoint,
tokenEndpoint: config.token_endpoint,
userInfoEndpoint: config.userinfo_endpoint,
@@ -88,7 +89,7 @@ export async function POST(context: APIContext) {
responseTypes: config.response_types_supported || ["code"],
grantTypes: config.grant_types_supported || ["authorization_code"],
// Suggested domain from issuer
suggestedDomain: new URL(cleanIssuer).hostname.replace("www.", ""),
suggestedDomain: parsedIssuer.hostname.replace("www.", ""),
};
return new Response(JSON.stringify(discoveredConfig), {

View File

@@ -82,11 +82,10 @@ export async function POST(context: APIContext) {
);
}
// Clean issuer URL (remove trailing slash); validate format
let cleanIssuer = issuer;
// Validate issuer URL format but keep trailing slash if provided
const trimmedIssuer = issuer.toString().trim();
try {
const issuerUrl = new URL(issuer.toString().trim());
cleanIssuer = issuerUrl.toString().replace(/\/$/, "");
new URL(trimmedIssuer);
} catch {
return new Response(
JSON.stringify({ error: `Invalid issuer URL format: ${issuer}` }),
@@ -99,7 +98,7 @@ export async function POST(context: APIContext) {
let normalized;
try {
normalized = await normalizeOidcProviderConfig(cleanIssuer, {
normalized = await normalizeOidcProviderConfig(trimmedIssuer, {
clientId,
clientSecret,
authorizationEndpoint,
@@ -134,7 +133,7 @@ export async function POST(context: APIContext) {
.insert(ssoProviders)
.values({
id: nanoid(),
issuer: cleanIssuer,
issuer: trimmedIssuer,
domain,
oidcConfig: JSON.stringify(storedOidcConfig),
userId: user.id,
@@ -213,12 +212,10 @@ export async function PUT(context: APIContext) {
// Parse existing config
const existingConfig = JSON.parse(existingProvider.oidcConfig);
const effectiveIssuer = issuer || existingProvider.issuer;
const effectiveIssuer = issuer?.toString().trim() || existingProvider.issuer;
let cleanIssuer = effectiveIssuer;
try {
const issuerUrl = new URL(effectiveIssuer.toString().trim());
cleanIssuer = issuerUrl.toString().replace(/\/$/, "");
new URL(effectiveIssuer);
} catch {
return new Response(
JSON.stringify({ error: `Invalid issuer URL format: ${effectiveIssuer}` }),
@@ -244,7 +241,7 @@ export async function PUT(context: APIContext) {
let normalized;
try {
normalized = await normalizeOidcProviderConfig(cleanIssuer, mergedConfig);
normalized = await normalizeOidcProviderConfig(effectiveIssuer, mergedConfig);
} catch (error) {
if (error instanceof OidcConfigError) {
return new Response(
@@ -266,7 +263,7 @@ export async function PUT(context: APIContext) {
const [updatedProvider] = await db
.update(ssoProviders)
.set({
issuer: cleanIssuer,
issuer: effectiveIssuer,
domain: domain || existingProvider.domain,
oidcConfig: JSON.stringify(storedOidcConfig),
organizationId: organizationId !== undefined ? organizationId : existingProvider.organizationId,

View File

@@ -66,6 +66,7 @@ export const POST: APIRoute = async ({ request }) => {
configId: config.id,
name: repo.name,
fullName: repo.fullName,
normalizedFullName: repo.fullName.toLowerCase(),
url: repo.url,
cloneUrl: repo.cloneUrl,
owner: repo.owner,
@@ -97,6 +98,7 @@ export const POST: APIRoute = async ({ request }) => {
userId,
configId: config.id,
name: org.name,
normalizedName: org.name.toLowerCase(),
avatarUrl: org.avatarUrl,
membershipRole: org.membershipRole,
isIncluded: false,
@@ -113,22 +115,22 @@ export const POST: APIRoute = async ({ request }) => {
await db.transaction(async (tx) => {
const [existingRepos, existingOrgs] = await Promise.all([
tx
.select({ fullName: repositories.fullName })
.select({ normalizedFullName: repositories.normalizedFullName })
.from(repositories)
.where(eq(repositories.userId, userId)),
tx
.select({ name: organizations.name })
.select({ normalizedName: organizations.normalizedName })
.from(organizations)
.where(eq(organizations.userId, userId)),
]);
const existingRepoNames = new Set(existingRepos.map((r) => r.fullName));
const existingOrgNames = new Set(existingOrgs.map((o) => o.name));
const existingRepoNames = new Set(existingRepos.map((r) => r.normalizedFullName));
const existingOrgNames = new Set(existingOrgs.map((o) => o.normalizedName));
insertedRepos = newRepos.filter(
(r) => !existingRepoNames.has(r.fullName)
(r) => !existingRepoNames.has(r.normalizedFullName)
);
insertedOrgs = newOrgs.filter((o) => !existingOrgNames.has(o.name));
insertedOrgs = newOrgs.filter((o) => !existingOrgNames.has(o.normalizedName));
// Batch insert repositories to avoid SQLite parameter limit (dynamic by column count)
const sample = newRepos[0];
@@ -140,7 +142,7 @@ export const POST: APIRoute = async ({ request }) => {
await tx
.insert(repositories)
.values(batch)
.onConflictDoNothing({ target: [repositories.userId, repositories.fullName] });
.onConflictDoNothing({ target: [repositories.userId, repositories.normalizedFullName] });
}
}

View File

@@ -1,5 +1,4 @@
import type { APIRoute } from "astro";
import { Octokit } from "@octokit/rest";
import { configs, db, organizations, repositories } from "@/lib/db";
import { and, eq } from "drizzle-orm";
import { jsonResponse, createSecureErrorResponse } from "@/lib/utils";
@@ -15,7 +14,7 @@ import { createGitHubClient } from "@/lib/github";
export const POST: APIRoute = async ({ request }) => {
try {
const body: AddOrganizationApiRequest = await request.json();
const { role, org, userId } = body;
const { role, org, userId, force = false } = body;
if (!org || !userId || !role) {
return jsonResponse({
@@ -24,21 +23,58 @@ export const POST: APIRoute = async ({ request }) => {
});
}
// Check if org already exists
const existingOrg = await db
const trimmedOrg = org.trim();
const normalizedOrg = trimmedOrg.toLowerCase();
// Check if org already exists (case-insensitive)
const [existingOrg] = await db
.select()
.from(organizations)
.where(
and(eq(organizations.name, org), eq(organizations.userId, userId))
);
and(
eq(organizations.userId, userId),
eq(organizations.normalizedName, normalizedOrg)
)
)
.limit(1);
if (existingOrg.length > 0) {
if (existingOrg && !force) {
return jsonResponse({
data: {
success: false,
error: "Organization already exists for this user",
},
status: 400,
status: 409,
});
}
if (existingOrg && force) {
const [updatedOrg] = await db
.update(organizations)
.set({
membershipRole: role,
normalizedName: normalizedOrg,
updatedAt: new Date(),
})
.where(eq(organizations.id, existingOrg.id))
.returning();
const resPayload: AddOrganizationApiResponse = {
success: true,
organization: updatedOrg ?? existingOrg,
message: "Organization already exists; using existing record.",
};
return jsonResponse({ data: resPayload, status: 200 });
}
if (existingOrg) {
return jsonResponse({
data: {
success: false,
error: "Organization already exists for this user",
},
status: 409,
});
}
@@ -71,17 +107,21 @@ export const POST: APIRoute = async ({ request }) => {
// Create authenticated Octokit instance with rate limit tracking
const githubUsername = decryptedConfig.githubConfig?.owner || undefined;
const octokit = createGitHubClient(decryptedConfig.githubConfig.token, userId, githubUsername);
const octokit = createGitHubClient(
decryptedConfig.githubConfig.token,
userId,
githubUsername
);
// Fetch org metadata
const { data: orgData } = await octokit.orgs.get({ org });
const { data: orgData } = await octokit.orgs.get({ org: trimmedOrg });
// Fetch repos based on config settings
const allRepos = [];
// Fetch all repos (public, private, and member) to show in UI
const publicRepos = await octokit.paginate(octokit.repos.listForOrg, {
org,
org: trimmedOrg,
type: "public",
per_page: 100,
});
@@ -89,7 +129,7 @@ export const POST: APIRoute = async ({ request }) => {
// Always fetch private repos to show them in the UI
const privateRepos = await octokit.paginate(octokit.repos.listForOrg, {
org,
org: trimmedOrg,
type: "private",
per_page: 100,
});
@@ -97,7 +137,7 @@ export const POST: APIRoute = async ({ request }) => {
// Also fetch member repos (includes private repos the user has access to)
const memberRepos = await octokit.paginate(octokit.repos.listForOrg, {
org,
org: trimmedOrg,
type: "member",
per_page: 100,
});
@@ -107,12 +147,17 @@ export const POST: APIRoute = async ({ request }) => {
allRepos.push(...uniqueMemberRepos);
// Insert repositories
const repoRecords = allRepos.map((repo) => ({
const repoRecords = allRepos.map((repo) => {
const normalizedOwner = repo.owner.login.trim().toLowerCase();
const normalizedRepoName = repo.name.trim().toLowerCase();
return {
id: uuidv4(),
userId,
configId,
name: repo.name,
fullName: repo.full_name,
normalizedFullName: `${normalizedOwner}/${normalizedRepoName}`,
url: repo.html_url,
cloneUrl: repo.clone_url ?? "",
owner: repo.owner.login,
@@ -138,7 +183,8 @@ export const POST: APIRoute = async ({ request }) => {
errorMessage: null,
createdAt: repo.created_at ? new Date(repo.created_at) : new Date(),
updatedAt: repo.updated_at ? new Date(repo.updated_at) : new Date(),
}));
};
});
// Batch insert repositories to avoid SQLite parameter limit
// Compute batch size based on column count
@@ -150,7 +196,7 @@ export const POST: APIRoute = async ({ request }) => {
await db
.insert(repositories)
.values(batch)
.onConflictDoNothing({ target: [repositories.userId, repositories.fullName] });
.onConflictDoNothing({ target: [repositories.userId, repositories.normalizedFullName] });
}
// Insert organization metadata
@@ -159,6 +205,7 @@ export const POST: APIRoute = async ({ request }) => {
userId,
configId,
name: orgData.login,
normalizedName: normalizedOrg,
avatarUrl: orgData.avatar_url,
membershipRole: role,
isIncluded: false,

View File

@@ -15,7 +15,7 @@ import { createMirrorJob } from "@/lib/helpers";
export const POST: APIRoute = async ({ request }) => {
try {
const body: AddRepositoriesApiRequest = await request.json();
const { owner, repo, userId } = body;
const { owner, repo, userId, force = false } = body;
if (!owner || !repo || !userId) {
return new Response(
@@ -27,26 +27,43 @@ export const POST: APIRoute = async ({ request }) => {
);
}
const trimmedOwner = owner.trim();
const trimmedRepo = repo.trim();
if (!trimmedOwner || !trimmedRepo) {
return jsonResponse({
data: {
success: false,
error: "Missing owner, repo, or userId",
},
status: 400,
});
}
const normalizedOwner = trimmedOwner.toLowerCase();
const normalizedRepo = trimmedRepo.toLowerCase();
const normalizedFullName = `${normalizedOwner}/${normalizedRepo}`;
// Check if repository with the same owner, name, and userId already exists
const existingRepo = await db
const [existingRepo] = await db
.select()
.from(repositories)
.where(
and(
eq(repositories.owner, owner),
eq(repositories.name, repo),
eq(repositories.userId, userId)
eq(repositories.userId, userId),
eq(repositories.normalizedFullName, normalizedFullName)
)
);
)
.limit(1);
if (existingRepo.length > 0) {
if (existingRepo && !force) {
return jsonResponse({
data: {
success: false,
error:
"Repository with this name and owner already exists for this user",
},
status: 400,
status: 409,
});
}
@@ -68,14 +85,17 @@ export const POST: APIRoute = async ({ request }) => {
const octokit = new Octokit(); // No auth for public repos
const { data: repoData } = await octokit.rest.repos.get({ owner, repo });
const { data: repoData } = await octokit.rest.repos.get({
owner: trimmedOwner,
repo: trimmedRepo,
});
const metadata = {
id: uuidv4(),
const baseMetadata = {
userId,
configId,
name: repoData.name,
fullName: repoData.full_name,
normalizedFullName,
url: repoData.html_url,
cloneUrl: repoData.clone_url,
owner: repoData.owner.login,
@@ -94,6 +114,37 @@ export const POST: APIRoute = async ({ request }) => {
description: repoData.description ?? null,
defaultBranch: repoData.default_branch,
visibility: (repoData.visibility ?? "public") as RepositoryVisibility,
lastMirrored: existingRepo?.lastMirrored ?? null,
errorMessage: existingRepo?.errorMessage ?? null,
mirroredLocation: existingRepo?.mirroredLocation ?? "",
destinationOrg: existingRepo?.destinationOrg ?? null,
updatedAt: repoData.updated_at
? new Date(repoData.updated_at)
: new Date(),
};
if (existingRepo && force) {
const [updatedRepo] = await db
.update(repositories)
.set({
...baseMetadata,
normalizedFullName,
configId,
})
.where(eq(repositories.id, existingRepo.id))
.returning();
const resPayload: AddRepositoriesApiResponse = {
success: true,
repository: updatedRepo ?? existingRepo,
message: "Repository already exists; metadata refreshed.",
};
return jsonResponse({ data: resPayload, status: 200 });
}
const metadata = {
id: uuidv4(),
status: "imported" as Repository["status"],
lastMirrored: null,
errorMessage: null,
@@ -102,15 +153,13 @@ export const POST: APIRoute = async ({ request }) => {
createdAt: repoData.created_at
? new Date(repoData.created_at)
: new Date(),
updatedAt: repoData.updated_at
? new Date(repoData.updated_at)
: new Date(),
};
...baseMetadata,
} satisfies Repository;
await db
.insert(repositories)
.values(metadata)
.onConflictDoNothing({ target: [repositories.userId, repositories.fullName] });
.onConflictDoNothing({ target: [repositories.userId, repositories.normalizedFullName] });
createMirrorJob({
userId,

View File

@@ -81,11 +81,12 @@ export interface AddRepositoriesApiRequest {
userId: string;
repo: string;
owner: string;
force?: boolean;
}
export interface AddRepositoriesApiResponse {
success: boolean;
message: string;
repository: Repository;
repository?: Repository;
error?: string;
}

View File

@@ -45,11 +45,12 @@ export interface AddOrganizationApiRequest {
userId: string;
org: string;
role: MembershipRole;
force?: boolean;
}
export interface AddOrganizationApiResponse {
success: boolean;
message: string;
organization: Organization;
organization?: Organization;
error?: string;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB