Merge pull request #11 from tcr-n/main #48
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Deploy to Linux Server | |
| on: | |
| push: | |
| branches: ["*"] | |
| pull_request: | |
| branches: ["*"] | |
| jobs: | |
| syntax-check: | |
| name: Syntax check (CSV & JSON) | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v6 | |
| - name: Check trafic.json syntax | |
| run: | | |
| python -m json.tool trafic.json | |
| - name: Check lines_picto.csv syntax | |
| run: | | |
| python -c "import csv; f=open('lines_picto.csv', encoding='utf-8'); next(csv.reader(f, delimiter=';')); f.seek(0); [row for row in csv.reader(f, delimiter=';')]" | |
| - name: Check logo paths existence | |
| run: python -c "import csv,os;f=open('lines_picto.csv',encoding='utf-8');r=csv.DictReader(f,delimiter=';');missing=[row['logoPath'] for row in r if not os.path.exists(row['logoPath'].replace('https://clarifygdps.com/hexatransit/',''))];f.close(); | |
| print('Missing logo files ({}):'.format(len(missing)));[print(' -',m) for m in missing];exit(1) if missing else None" | |
| gtfs-check: | |
| name: GTFS routes check | |
| runs-on: ubuntu-latest | |
| needs: syntax-check | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v6 | |
| - name: Check GTFS routes match trafic.json | |
| run: | | |
| python - <<'PY' | |
| import json, sys, io, urllib.request, zipfile, csv | |
| # Load trafic.json | |
| try: | |
| with open('trafic.json', encoding='utf-8') as f: | |
| data = json.load(f) | |
| except Exception as e: | |
| print('Failed to load trafic.json:', e) | |
| sys.exit(1) | |
| agencies = {} | |
| for company in data: | |
| aid = company.get('companyId') | |
| if not aid: | |
| continue | |
| line_ids = set() | |
| for group in company.get('lines', []) or []: | |
| # each group is a list; items with 'lineId' contain lines | |
| for item in group: | |
| if not isinstance(item, dict): | |
| continue | |
| lid = item.get('lineId') | |
| # normalize/strip line ids to ensure exact route_id matching | |
| if lid is None: | |
| continue | |
| lid_s = str(lid).strip() | |
| if lid_s: | |
| line_ids.add(lid_s) | |
| agencies[aid] = line_ids | |
| if not agencies: | |
| print('No agencies / line IDs found in trafic.json to check.') | |
| sys.exit(0) | |
| errors = [] | |
| total = len(agencies) | |
| idx = 0 | |
| for aid, expected_line_ids in agencies.items(): | |
| idx += 1 | |
| if not expected_line_ids: | |
| print(f'[{idx}/{total}] Agency "{aid}": no lineIds to check, skipping') | |
| continue | |
| url = f'https://clarifygdps.com/bridge/gtfs/{aid}.zip' | |
| print(f'[{idx}/{total}] Checking GTFS for agency "{aid}" -> {url}') | |
| try: | |
| resp = urllib.request.urlopen(url, timeout=30) | |
| dataz = resp.read() | |
| except Exception as e: | |
| msg = f'Failed to download {url}: {e}' | |
| print('ERROR -', msg) | |
| errors.append(msg) | |
| continue | |
| try: | |
| z = zipfile.ZipFile(io.BytesIO(dataz)) | |
| except Exception as e: | |
| msg = f'Invalid zip for {aid}: {e}' | |
| print('ERROR -', msg) | |
| errors.append(msg) | |
| continue | |
| namelist = z.namelist() | |
| rname = None | |
| for n in namelist: | |
| if n.endswith('routes.txt'): | |
| rname = n | |
| break | |
| if not rname: | |
| msg = f'No routes.txt in GTFS for {aid} (found files: {namelist[:10]})' | |
| print('ERROR -', msg) | |
| errors.append(msg) | |
| continue | |
| try: | |
| with z.open(rname) as rf: | |
| txt = io.TextIOWrapper(rf, encoding='utf-8', errors='replace') | |
| reader = csv.DictReader(txt) | |
| # Remove BOM from header names if present | |
| if reader.fieldnames: | |
| reader.fieldnames = [fn.lstrip('\ufeff') for fn in reader.fieldnames] | |
| routes = list(reader) | |
| except Exception as e: | |
| msg = f'Failed to read routes.txt for {aid}: {e}' | |
| print('ERROR -', msg) | |
| errors.append(msg) | |
| continue | |
| # Build set of route_id values (trimmed). Do NOT use route_short_name. | |
| route_ids = set() | |
| for r in routes: | |
| if isinstance(r, dict): | |
| rid = r.get('route_id') | |
| if rid is None: | |
| continue | |
| rid_s = str(rid).strip() | |
| if rid_s: | |
| route_ids.add(rid_s) | |
| # Ensure expected IDs are trimmed strings as well for exact comparison | |
| expected_ids = set(str(x).strip() for x in expected_line_ids if x is not None and str(x).strip()) | |
| missing = sorted([lid for lid in expected_ids if lid not in route_ids]) | |
| if missing: | |
| msg = f'{len(missing)} missing line_id(s) not found in routes.txt: {missing[:20]}' | |
| print('ERROR -', msg) | |
| errors.append(f'Agency {aid}: {msg}') | |
| else: | |
| print(f'Agency {aid}: OK ({len(expected_line_ids)} line_ids matched)') | |
| if errors: | |
| print('\nGTFS verification errors:') | |
| for e in errors: | |
| print(' -', e) | |
| sys.exit(1) | |
| else: | |
| print('\nAll GTFS checks passed.') | |
| PY | |
| - name: Check GTFS routes match lines_picto | |
| run: | | |
| python - <<'PY' | |
| import csv, sys, io, urllib.request, zipfile | |
| from collections import defaultdict | |
| # Read lines_picto.csv and group line_ids by agency_id | |
| try: | |
| f = open('lines_picto.csv', encoding='utf-8') | |
| except Exception as e: | |
| print('Failed to open lines_picto.csv:', e) | |
| sys.exit(1) | |
| reader = csv.DictReader(f, delimiter=';') | |
| # Remove BOM from header names if present (some editors add a UTF-8 BOM) | |
| if reader.fieldnames: | |
| reader.fieldnames = [fn.lstrip('\ufeff') for fn in reader.fieldnames] | |
| agencies = defaultdict(set) | |
| for row in reader: | |
| aid = row.get('agency_id') | |
| lid = row.get('line_id') | |
| if not aid or lid is None: | |
| continue | |
| # normalize/strip line_id so we only compare route_id values | |
| lid_s = str(lid).strip() | |
| if lid_s: | |
| agencies[aid].add(lid_s) | |
| f.close() | |
| print(f'Parsed {len(agencies)} agency(ies): {list(agencies.keys())[:50]}') | |
| errors = [] | |
| total_agencies = len(agencies) | |
| count = 0 | |
| for agency, line_ids in agencies.items(): | |
| count += 1 | |
| url = f'https://clarifygdps.com/bridge/gtfs/{agency}.zip' | |
| print(f'[{count}/{total_agencies}] Checking GTFS for agency "{agency}" -> {url}') | |
| try: | |
| resp = urllib.request.urlopen(url, timeout=30) | |
| data = resp.read() | |
| except Exception as e: | |
| msg = f'Failed to download {url}: {e}' | |
| print(f'Agency {agency}: ERROR - {msg}') | |
| errors.append(msg) | |
| continue | |
| try: | |
| z = zipfile.ZipFile(io.BytesIO(data)) | |
| except Exception as e: | |
| msg = f'Invalid zip for {agency}: {e}' | |
| print(f'Agency {agency}: ERROR - {msg}') | |
| errors.append(msg) | |
| continue | |
| namelist = z.namelist() | |
| # Find a routes.txt (may be in a subfolder) | |
| rname = None | |
| for n in namelist: | |
| if n.endswith('routes.txt'): | |
| rname = n | |
| break | |
| if not rname: | |
| msg = f'No routes.txt in GTFS for {agency} (found files: {namelist[:10]})' | |
| print(f'Agency {agency}: ERROR - {msg}') | |
| errors.append(msg) | |
| continue | |
| try: | |
| with z.open(rname) as rf: | |
| txt = io.TextIOWrapper(rf, encoding='utf-8', errors='replace') | |
| reader = csv.DictReader(txt) | |
| # Remove BOM from header names if present | |
| if reader.fieldnames: | |
| reader.fieldnames = [fn.lstrip('\ufeff') for fn in reader.fieldnames] | |
| routes = list(reader) | |
| except Exception as e: | |
| msg = f'Failed to read routes.txt for {agency}: {e}' | |
| print(f'Agency {agency}: ERROR - {msg}') | |
| errors.append(msg) | |
| continue | |
| # Build set of route_id values (trimmed). Do NOT use route_short_name. | |
| route_ids = set() | |
| for r in routes: | |
| if isinstance(r, dict): | |
| rid = r.get('route_id') | |
| if rid is None: | |
| continue | |
| rid_s = str(rid).strip() | |
| if rid_s: | |
| route_ids.add(rid_s) | |
| # Ensure line IDs from CSV are trimmed for exact comparison | |
| csv_ids = set(str(x).strip() for x in line_ids if x is not None and str(x).strip()) | |
| # Match only against route_id (do not match on route_short_name) | |
| missing = sorted([lid for lid in csv_ids if lid not in route_ids]) | |
| if missing: | |
| msg = f'{len(missing)} missing line_id(s) not found in routes.txt: {missing[:20]}' | |
| print(f'Agency {agency}: ERROR - {msg}') | |
| errors.append(f'Agency {agency}: {msg}') | |
| else: | |
| print(f'Agency {agency}: OK ({len(line_ids)} line_ids matched)') | |
| # Summary and exit code | |
| if errors: | |
| print('\nGTFS verification errors:') | |
| for e in errors: | |
| print(' -', e) | |
| sys.exit(1) | |
| else: | |
| print('\nAll GTFS checks passed.') | |
| PY | |
| deploy: | |
| if: github.ref == 'refs/heads/main' | |
| needs: [syntax-check, gtfs-check] | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v6 | |
| - name: Set up SSH key | |
| uses: webfactory/ssh-agent@v0.9.0 | |
| with: | |
| ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }} | |
| - name: Add server to known_hosts | |
| run: | | |
| ssh-keyscan 217.182.174.221 >> ~/.ssh/known_hosts | |
| - name: Copy files to server | |
| run: | | |
| rsync -avz --delete --exclude='.git*' ./ jouca@217.182.174.221:/var/www/html/hexatransit | |
| env: | |
| SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} |