diff --git a/.gitignore b/.gitignore index 7723488..1f973c1 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ test.py backups/* !backups/.gitkeep wizard.pyc +__pycache__/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..39fd6c5 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.8 +COPY ./ /backup +RUN chown -R backup:backup /var/backups +WORKDIR /backup +RUN pip install -r requirements.txt +USER backup +ENTRYPOINT ["python", "backup.py"] diff --git a/README.md b/README.md index bd24e5d..f6370e3 100644 --- a/README.md +++ b/README.md @@ -1,65 +1,205 @@ +# Jira Backup Python + [![datree-badge](https://s3.amazonaws.com/catalog.static.datree.io/datree-badge-28px.svg)](https://datree.io/?src=badge) -# Introduction -Jira and Confluence are not (officially) supporting the option of creating automatic backups for their cloud instance. -This project was created to provide a fully automated infrastructure for backing up Atlassian Cloud Jira or Confluence instances on a periodic basis. - -There are shell and bash scripts out there, which were created in order to download the backup file locally without the use of the "backup manager" UI, -but most of them are not maintained and throwing errors. So, this project is aiming for full backup automation, and therefore this is the features road map: - -:white_check_mark: Create a script in python -:white_check_mark: Support creating config.json from user input ('wizard') -:white_check_mark: Download backup file locally -:white_check_mark: Add an option to stream backup file to S3 -:white_check_mark: Check how to manually create a cron task on OS X / Linux -:white_check_mark: Check how to manually create a schedule task on windows -:black_square_button: Support adding cron / scheduled task from script     - -# Installation -## Prerequisite: -:heavy_plus_sign: python 2.7.x or python 3.x.x -:heavy_plus_sign: [virtualenv](https://pypi.org/project/virtualenv/) installed globally (pip install virtualenv) - -## Instructions: -1. Create and start [virtual environment](https://python-guide-cn.readthedocs.io/en/latest/dev/virtualenvs.html) (in this example, the virtualenv will be called "venv") -2. Install requirements +[![Python 3.7+](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +A Python-based backup tool for Atlassian Cloud Jira and Confluence instances with multi-cloud storage support and automated scheduling. + +## 🚀 Features + +- **Jira & Confluence Backups**: Create backups for both Jira and Confluence Cloud instances +- **Multi-Cloud Support**: Stream backups directly to AWS S3, Google Cloud Storage, or Azure Blob Storage +- **Local Download**: Option to download backup files locally +- **Cross-Platform Scheduling**: Automatically create cron jobs (Linux/macOS) or scheduled tasks (Windows) +- **Configuration Wizard**: Interactive setup for easy configuration +- **API Token Authentication**: Secure authentication using Atlassian API tokens + +## 📋 Prerequisites + +- Python 3.7 or higher +- Atlassian Cloud account (Jira and/or Confluence) +- API token from [Atlassian](https://id.atlassian.com/manage/api-tokens) +- (Optional) Cloud storage account: AWS, Google Cloud, or Azure + +## 🛠️ Installation + +1. **Clone the repository** + ```bash + git clone https://github.com/yourusername/jira-backup-py.git + cd jira-backup-py + ``` + +2. **Create a virtual environment** + ```bash + python -m venv venv + source venv/bin/activate # On Windows: venv\Scripts\activate + ``` + +3. **Install dependencies** + ```bash + pip install -r requirements.txt + ``` + +4. **Generate API token** + - Go to [Atlassian API Tokens](https://id.atlassian.com/manage/api-tokens) and create a token + +5. **Configure the application** + - Create a `config.yaml` file with your settings (see Configuration section below) + - Or run the configuration wizard: `python backup.py -w` + +## ⚙️ Configuration + +### Configuration File Setup + +Create a `config.yaml` file with your settings: + +```yaml +--- +HOST_URL: "your-instance.atlassian.net" +USER_EMAIL: "your.email@company.com" +API_TOKEN: "your-api-token" +INCLUDE_ATTACHMENTS: false +DOWNLOAD_LOCALLY: true + +# AWS S3 Configuration (optional) +UPLOAD_TO_S3: + S3_BUCKET: "my-backup-bucket" + AWS_ACCESS_KEY_ID: "your-access-key" + AWS_SECRET_ACCESS_KEY: "your-secret-key" + AWS_S3_REGION: "us-east-1" + +# Google Cloud Storage Configuration (optional) +UPLOAD_TO_GCP: + GCP_PROJECT_ID: "my-project-id" + GCS_BUCKET: "my-backup-bucket" + GCP_SERVICE_ACCOUNT_KEY: "/path/to/service-account-key.json" + +# Azure Blob Storage Configuration (optional) +UPLOAD_TO_AZURE: + AZURE_ACCOUNT_NAME: "mystorageaccount" + AZURE_CONTAINER: "my-backup-container" + AZURE_CONNECTION_STRING: "DefaultEndpointsProtocol=https;AccountName=..." + # OR use AZURE_ACCOUNT_KEY instead of connection string + # AZURE_ACCOUNT_KEY: "your-account-key" ``` -$(venv) pip install -r requirements.txt -``` -3. Generate an API token at https://id.atlassian.com/manage/api-tokens -![Screenshot](https://github.com/datreeio/jira-backup-py/blob/master/screenshots/atlassian-api-token.png) -4. Fill the details at the [config.yaml file](https://github.com/datreeio/jira-backup-py/blob/master/config.json) or run the backup.py script with '-w' flag -5. Run backup.py script with the flag '-j' to backup Jira or '-c' to backup Confluence + +### Configuration Wizard + +For interactive setup, run: +```bash +python backup.py -w ``` -$(venv) python backup.py -``` -![Screenshot](https://github.com/datreeio/jira-backup-py/blob/master/screenshots/terminal.png) - -## What's next? -It depends on your needs. I, for example, use this script together with [serverless](https://serverless.com/) to create a periodic [AWS lambda](https://aws.amazon.com/lambda/) which triggered every 4 days, creating a backup and upload it directly to S3. - -There is a more "stupid" option to get the same result - by creating a cron / scheduled task on your local machine: -* **OS X / Linux:** set a cron task with crontab -``` -echo "* * * * * cd %script dir% && %activate virtualenv% && python backup.py > %log name% 2>&1" | crontab - -``` -Example for adding a cron task which will run every 4 days, at 10:00 + +This will guide you through setting up basic Jira credentials and S3 configuration. + +## 🚀 Usage + +### Manual Backup + +```bash +# Backup Jira (default) +python backup.py -j + +# Backup Confluence +python backup.py -c + +# Run configuration wizard +python backup.py -w ``` -echo "0 10 */4 * * cd ~/Dev/jira-backup-py && source venv/bin/activate && python backup.py > backup_script.log 2>&1" | crontab - -``` - -* **Windows:** set a scheduled task with task scheduler -``` -schtasks /create /tn "%task name%" /sc DAILY /mo %number of days% /tr "%full path to win_task_wrapper.bat%" /st %start time% -``` -Example for adding a scheduled task which will run every 4 days, at 10:00 -``` -schtasks /create /tn "jira-backup" /sc DAILY /mo 4 /tr "C:\jira-backup-py\win_task_wrapper.bat" /st 10:00 -``` -# Changelog: -* 04 SEP 2020 - Support Confluence backup -* 16 JAN 2019 - Updated script to work w/ [API token](https://confluence.atlassian.com/cloud/api-tokens-938839638.html), instead personal Jira user name and password - -# Resources: -:heavy_plus_sign: [JIRA support - How to Automate Backups for JIRA Cloud applications](https://confluence.atlassian.com/jirakb/how-to-automate-backups-for-jira-cloud-applications-779160659.html) -:heavy_plus_sign: [Atlassian Labs' automatic-cloud-backup script](https://bitbucket.org/atlassianlabs/automatic-cloud-backup/src/d43ca5f33192e78b2e1869ab7c708bb32bfd7197/backup.ps1?at=master&fileviewer=file-view-default) -:heavy_plus_sign: [A more maintainable version of Atlassian Labs' script](https://github.com/mattock/automatic-cloud-backup) + +### Automated Scheduling + +Set up scheduled backups using system schedulers: + +```bash +# Setup automated Jira backup every 4 days at 10:00 AM (default) +python backup.py -s + +# Setup automated Confluence backup every 7 days at 2:30 PM +python backup.py -s --schedule-days 7 --schedule-time 14:30 --schedule-service confluence + +# Setup automated Jira backup every 2 days at 6:00 AM +python backup.py -s --schedule-days 2 --schedule-time 06:00 --schedule-service jira +``` + +This will create: +- **Linux/macOS**: A cron job in your crontab +- **Windows**: A scheduled task in Task Scheduler + +### Command Line Options + +| Option | Description | +|--------|-------------| +| `-j, --jira` | Backup Jira (default if no service specified) | +| `-c, --confluence` | Backup Confluence | +| `-w, --wizard` | Run configuration wizard | +| `-s, --schedule` | Setup automated scheduled backup | +| `--schedule-days` | Frequency in days for scheduled backup (default: 4) | +| `--schedule-time` | Time for scheduled backup in HH:MM format (default: 10:00) | +| `--schedule-service` | Service for scheduled backup (jira/confluence, default: jira) | + +## 🔧 Advanced Configuration + +### Minimal Configuration + +If you only want to download backups locally without cloud storage: + +```yaml +--- +HOST_URL: "your-instance.atlassian.net" +USER_EMAIL: "your.email@company.com" +API_TOKEN: "your-api-token" +INCLUDE_ATTACHMENTS: false +DOWNLOAD_LOCALLY: true +``` + +Simply omit the `UPLOAD_TO_XXX` sections you don't need. + +### Multiple Cloud Providers + +You can configure multiple cloud storage providers simultaneously - the script will upload to all configured destinations: + +```yaml +UPLOAD_TO_S3: + S3_BUCKET: "my-s3-bucket" + # ... S3 config + +UPLOAD_TO_GCP: + GCS_BUCKET: "my-gcs-bucket" + # ... GCP config + +UPLOAD_TO_AZURE: + AZURE_CONTAINER: "my-azure-container" + # ... Azure config +``` + +## 🤝 Contributing + +Contributions are welcome! Please feel free to submit issues and pull requests. + +## 📝 Changelog + +- **2025-06-24**: Added separate cron schedules for Jira and Confluence backups +- **2025-06-24**: Made cloud storage configuration sections optional +- **2025-06-24**: Added automated scheduling support for backup tasks +- **2025-06-23**: Added Google Cloud Storage and Azure Blob Storage support +- **2020-09-04**: Added Confluence backup support +- **2019-01-16**: Updated to use API tokens instead of passwords + +## 📜 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 🙏 Acknowledgments + +- Original concept inspired by [Atlassian Labs' automatic-cloud-backup](https://bitbucket.org/atlassianlabs/automatic-cloud-backup/) +- Thanks to all contributors who have helped improve this project + +## 📞 Support + +- **Issues**: [GitHub Issues](https://github.com/yourusername/jira-backup-py/issues) + +--- + +**Note**: This tool is not officially supported by Atlassian. Use at your own risk and always verify your backups are working correctly. \ No newline at end of file diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/backup.py b/backup.py index a43add1..a61e6ee 100644 --- a/backup.py +++ b/backup.py @@ -4,10 +4,14 @@ import os import argparse import requests -import boto -from boto.s3.key import Key +import boto3 +from boto3.s3.transfer import TransferConfig +from google.cloud import storage +from azure.storage.blob import BlobServiceClient import wizard -from time import gmtime, strftime +import platform +import subprocess +import sys def read_config(): @@ -38,8 +42,8 @@ def create_confluence_backup(self): time.sleep(self.wait) while 'fileName' not in self.backup_status.keys(): self.backup_status = json.loads(self.session.get(confluence_backup_status).text) - print('Current status: {progress}; {description}'.format( - progress=self.backup_status['alternativePercentage'], + print('Current status: {progress}; {description}'.format( + progress=self.backup_status['alternativePercentage'], description=self.backup_status['currentStatus'])) time.sleep(self.wait) return 'https://{url}/wiki/download/{file_name}'.format( @@ -58,8 +62,8 @@ def create_jira_backup(self): while 'result' not in self.backup_status.keys(): self.backup_status = json.loads(self.session.get(jira_backup_status).text) print('Current status: {status} {progress}; {description}'.format( - status=self.backup_status['status'], - progress=self.backup_status['progress'], + status=self.backup_status['status'], + progress=self.backup_status['progress'], description=self.backup_status['description'])) time.sleep(self.wait) return '{prefix}/{result_id}'.format( @@ -79,31 +83,230 @@ def stream_to_s3(self, url, remote_filename): print('-> Streaming to S3') if self.config['UPLOAD_TO_S3']['AWS_ACCESS_KEY'] == '': - connect = boto.connect_s3() + s3_client = boto3.client('s3') else: - connect = boto.connect_s3( - aws_access_key_id=self.config['UPLOAD_TO_S3']['AWS_ACCESS_KEY'], - aws_secret_access_key=self.config['UPLOAD_TO_S3']['AWS_SECRET_KEY'] - ) + s3_client = boto3.client( + 's3', + aws_access_key_id=self.config['UPLOAD_TO_S3']['AWS_ACCESS_KEY'], + aws_secret_access_key=self.config['UPLOAD_TO_S3']['AWS_SECRET_KEY'], + region_name=self.config['UPLOAD_TO_S3']['AWS_REGION'], + endpoint_url=self.config['UPLOAD_TO_S3']['AWS_ENDPOINT_URL'], + use_ssl=self.config['UPLOAD_TO_S3']['AWS_IS_SECURE'] + ) - bucket = connect.get_bucket(self.config['UPLOAD_TO_S3']['S3_BUCKET']) + bucket_name = self.config['UPLOAD_TO_S3']['S3_BUCKET'] r = self.session.get(url, stream=True) if r.status_code == 200: - k = Key(bucket) - k.key = remote_filename - k.content_type = r.headers['content-type'] - k.set_contents_from_string(r.content) - return + key = "{s3_bucket}{s3_filename}".format( + s3_bucket=self.config['UPLOAD_TO_S3']['S3_DIR'], + s3_filename=remote_filename + ) + content_length = int(r.headers.get('Content-Length', 0)) + + config = TransferConfig( + multipart_threshold=content_length + 1, + max_concurrency=1, + use_threads=False + ) + + s3_client.upload_fileobj( + Fileobj=r.raw, + Bucket=bucket_name, + Key=key, + ExtraArgs={'ContentType': r.headers['content-type']}, + Config=config + ) + + def stream_to_gcs(self, url, remote_filename): + print('-> Streaming to GCS') + + if self.config['UPLOAD_TO_GCP']['GCP_SERVICE_ACCOUNT_KEY']: + client = storage.Client.from_service_account_json( + self.config['UPLOAD_TO_GCP']['GCP_SERVICE_ACCOUNT_KEY'], + project=self.config['UPLOAD_TO_GCP']['GCP_PROJECT_ID'] + ) + else: + client = storage.Client(project=self.config['UPLOAD_TO_GCP']['GCP_PROJECT_ID']) + + bucket_name = self.config['UPLOAD_TO_GCP']['GCS_BUCKET'] + bucket = client.bucket(bucket_name) + + r = self.session.get(url, stream=True) + if r.status_code == 200: + blob_name = "{gcs_dir}{filename}".format( + gcs_dir=self.config['UPLOAD_TO_GCP']['GCS_DIR'], + filename=remote_filename + ) + + blob = bucket.blob(blob_name) + blob.content_type = r.headers.get('content-type', 'application/zip') + + blob.upload_from_file(r.raw, content_type=blob.content_type) + + def stream_to_azure(self, url, remote_filename): + print('-> Streaming to Azure Blob Storage') + + if self.config['UPLOAD_TO_AZURE']['AZURE_CONNECTION_STRING']: + blob_service_client = BlobServiceClient.from_connection_string( + self.config['UPLOAD_TO_AZURE']['AZURE_CONNECTION_STRING'] + ) + else: + account_url = f"https://{self.config['UPLOAD_TO_AZURE']['AZURE_ACCOUNT_NAME']}.blob.core.windows.net" + blob_service_client = BlobServiceClient( + account_url=account_url, + credential=self.config['UPLOAD_TO_AZURE']['AZURE_ACCOUNT_KEY'] + ) + + container_name = self.config['UPLOAD_TO_AZURE']['AZURE_CONTAINER'] + + r = self.session.get(url, stream=True) + if r.status_code == 200: + blob_name = "{azure_dir}{filename}".format( + azure_dir=self.config['UPLOAD_TO_AZURE']['AZURE_DIR'], + filename=remote_filename + ) + + blob_client = blob_service_client.get_blob_client( + container=container_name, + blob=blob_name + ) + + blob_client.upload_blob( + r.raw, + content_type=r.headers.get('content-type', 'application/zip'), + overwrite=True + ) + + +def setup_scheduled_task(frequency_days=4, time_hour=10, time_minute=0, service_type='jira'): + script_path = os.path.abspath(__file__) + script_dir = os.path.dirname(script_path) + + system = platform.system().lower() + + if system in ['linux', 'darwin']: + return setup_cron_task(script_path, script_dir, frequency_days, time_hour, time_minute, service_type) + elif system == 'windows': + return setup_windows_task(script_path, script_dir, frequency_days, time_hour, time_minute, service_type) + else: + raise Exception(f"Unsupported operating system: {system}") + + +def setup_cron_task(script_path, script_dir, frequency_days, time_hour, time_minute, service_type): + python_path = sys.executable + service_flag = '-j' if service_type == 'jira' else '-c' + + cron_command = f"{time_minute} {time_hour} */{frequency_days} * * cd {script_dir} && {python_path} {script_path} {service_flag}" + + try: + result = subprocess.run(['crontab', '-l'], capture_output=True, text=True) + existing_cron = result.stdout if result.returncode == 0 else "" + + # Remove only the cron entry for the same service type + lines = existing_cron.strip().split('\n') if existing_cron.strip() else [] + updated_lines = [] + skip_next = False + + for i, line in enumerate(lines): + if skip_next: + skip_next = False + continue + + # Check if this is a comment line for jira-backup-py + if 'jira-backup-py automated backup' in line and f'({service_type})' in line: + # Check if the next line contains the cron command for this service + if i + 1 < len(lines) and service_flag in lines[i + 1]: + skip_next = True # Skip both the comment and the command + print(f"-> Updating existing {service_type} backup schedule...") + continue + + updated_lines.append(line) + + existing_cron = '\n'.join(updated_lines) + '\n' if updated_lines else "" + new_cron = existing_cron + f"# jira-backup-py automated backup ({service_type})\n{cron_command}\n" + + process = subprocess.Popen(['crontab', '-'], stdin=subprocess.PIPE, text=True) + process.communicate(input=new_cron) + + if process.returncode == 0: + print(f"-> Successfully scheduled {service_type} backup to run every {frequency_days} days at {time_hour:02d}:{time_minute:02d}") + return True + else: + print("-> Failed to create cron job") + return False + + except Exception as e: + print(f"-> Error setting up cron job: {e}") + return False + + +def setup_windows_task(script_path, script_dir, frequency_days, time_hour, time_minute, service_type): + python_path = sys.executable + service_flag = '-j' if service_type == 'jira' else '-c' + task_name = f"jira-backup-py-{service_type}" + + cmd = [ + 'schtasks', '/create', + '/tn', task_name, + '/sc', 'DAILY', + '/mo', str(frequency_days), + '/tr', f'"{python_path}" "{script_path}" {service_flag}', + '/st', f'{time_hour:02d}:{time_minute:02d}', + '/f' + ] + + try: + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode == 0: + print(f"-> Successfully scheduled {service_type} backup to run every {frequency_days} days at {time_hour:02d}:{time_minute:02d}") + return True + else: + print(f"-> Failed to create scheduled task: {result.stderr}") + return False + except Exception as e: + print(f"-> Error setting up scheduled task: {e}") + return False if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-w', action='store_true', dest='wizard', help='activate config wizard') parser.add_argument('-c', action='store_true', dest='confluence', help='activate confluence backup') parser.add_argument('-j', action='store_true', dest='jira', help='activate jira backup') - # print('debug command-line: {}'.format(parser.parse_args())) - if parser.parse_args().wizard: + parser.add_argument('-s', '--schedule', action='store_true', dest='schedule', help='setup automated scheduled backup') + parser.add_argument('--schedule-days', type=int, default=4, help='frequency in days for scheduled backup (default: 4)') + parser.add_argument('--schedule-time', type=str, default='10:00', help='time for scheduled backup in HH:MM format (default: 10:00)') + parser.add_argument('--schedule-service', type=str, choices=['jira', 'confluence'], default='jira', help='service type for scheduled backup (default: jira)') + args = parser.parse_args() + # print('debug command-line: {}'.format(args)) + + if args.wizard: wizard.create_config() + + if args.schedule: + try: + time_parts = args.schedule_time.split(':') + hour = int(time_parts[0]) + minute = int(time_parts[1]) if len(time_parts) > 1 else 0 + + if not (0 <= hour <= 23) or not (0 <= minute <= 59): + raise ValueError("Invalid time format") + + setup_scheduled_task( + frequency_days=args.schedule_days, + time_hour=hour, + time_minute=minute, + service_type=args.schedule_service + ) + print("-> Scheduled task setup completed") + exit(0) + except ValueError as e: + print(f"-> Error: Invalid time format. Use HH:MM format (e.g., 10:30)") + exit(1) + except Exception as e: + print(f"-> Error setting up scheduled task: {e}") + exit(1) + config = read_config() if config['HOST_URL'] == 'something.atlassian.net': @@ -111,7 +314,7 @@ def stream_to_s3(self, url, remote_filename): print('-> Starting backup; include attachments: {}'.format(config['INCLUDE_ATTACHMENTS'])) atlass = Atlassian(config) - if parser.parse_args().confluence: backup_url = atlass.create_confluence_backup() + if args.confluence: backup_url = atlass.create_confluence_backup() else: backup_url = atlass.create_jira_backup() print('-> Backup URL: {}'.format(backup_url)) @@ -121,5 +324,11 @@ def stream_to_s3(self, url, remote_filename): if config['DOWNLOAD_LOCALLY'] == 'true': atlass.download_file(backup_url, file_name) - if config['UPLOAD_TO_S3']['S3_BUCKET'] != '': - atlass.stream_to_s3(backup_url, file_name) \ No newline at end of file + if 'UPLOAD_TO_S3' in config and config['UPLOAD_TO_S3'].get('S3_BUCKET', '') != '': + atlass.stream_to_s3(backup_url, file_name) + + if 'UPLOAD_TO_GCP' in config and config['UPLOAD_TO_GCP'].get('GCS_BUCKET', '') != '': + atlass.stream_to_gcs(backup_url, file_name) + + if 'UPLOAD_TO_AZURE' in config and config['UPLOAD_TO_AZURE'].get('AZURE_CONTAINER', '') != '': + atlass.stream_to_azure(backup_url, file_name) \ No newline at end of file diff --git a/config.yaml b/config.yaml index 961fa7a..97465c0 100644 --- a/config.yaml +++ b/config.yaml @@ -4,7 +4,22 @@ USER_EMAIL: "email address for the Atlassian account you're using to create the API_TOKEN: "token ID generated at https://id.atlassian.com/manage/api-tokens" INCLUDE_ATTACHMENTS: "include attachments? this will make the backup size bigger - true / false" DOWNLOAD_LOCALLY: "download the backup file to backups folder? true / false" -UPLOAD_TO_S3: - S3_BUCKET: "S3 bucket name (empty value will skip this step)" +UPLOAD_TO_S3: + AWS_ENDPOINT_URL: "amazon S3 endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html" + AWS_REGION: "amazon S3 region" + S3_BUCKET: "S3 bucket name (empty value will skip this step)" + S3_DIR: "S3 directory for upload (example Atlassian/)" AWS_ACCESS_KEY: "not mandatory if already set on the machine with AWS CLI" - AWS_SECRET_KEY: "not mandatory if already set on the machine with AWS CLI" \ No newline at end of file + AWS_SECRET_KEY: "not mandatory if already set on the machine with AWS CLI" + AWS_IS_SECURE: True +UPLOAD_TO_GCP: + GCP_PROJECT_ID: "GCP project ID" + GCS_BUCKET: "GCS bucket name (empty value will skip this step)" + GCS_DIR: "GCS directory for upload (example Atlassian/)" + GCP_SERVICE_ACCOUNT_KEY: "path to service account key file (optional if using default credentials)" +UPLOAD_TO_AZURE: + AZURE_ACCOUNT_NAME: "Azure storage account name" + AZURE_CONTAINER: "Azure container name (empty value will skip this step)" + AZURE_DIR: "Azure directory for upload (example Atlassian/)" + AZURE_CONNECTION_STRING: "Azure storage connection string (optional if using account key)" + AZURE_ACCOUNT_KEY: "Azure storage account key (optional if using connection string)" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index cbe0492..53f68ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,5 @@ -boto==2.48.0 -certifi==2018.4.16 -chardet==3.0.4 -idna==2.6 -requests==2.20.0 -urllib3==1.24.2 -PyYAML==5.3.1 \ No newline at end of file +boto3==1.35.26 +google-cloud-storage==2.18.0 +azure-storage-blob==12.22.0 +PyYAML==6.0.2 +Requests==2.32.3 diff --git a/wizard.py b/wizard.py index 5b4780d..f0eb6e4 100644 --- a/wizard.py +++ b/wizard.py @@ -1,31 +1,45 @@ import os -import json +import yaml def create_config(): - jira_host = raw_input("What is your Jira host name? ") - user = raw_input("What is your Jira account email address? ") - password = raw_input("Paste your Jira API token: ") - attachments = raw_input("Do you want to include attachments? (true / false) ") - download_locally = raw_input("Do you want to download the backup file locally? (true / false) ") + jira_host = input("What is your Jira host name? ") + user = input("What is your Jira account email address? ") + password = input("Paste your Jira API token: ") + attachments = input("Do you want to include attachments? (true / false) ") + download_locally = input("Do you want to download the backup file locally? (true / false) ") + custom_config = { - 'JIRA_HOST': jira_host, - 'INCLUDE_ATTACHMENTS': attachments.lower(), - 'JIRA_EMAIL': user, + 'HOST_URL': jira_host, + 'USER_EMAIL': user, 'API_TOKEN': password, + 'INCLUDE_ATTACHMENTS': attachments.lower(), 'DOWNLOAD_LOCALLY': download_locally.lower(), 'UPLOAD_TO_S3': { + 'AWS_ENDPOINT_URL': "", + 'AWS_REGION': "", 'S3_BUCKET': "", + 'S3_DIR': "", 'AWS_ACCESS_KEY': "", - 'AWS_SECRET_KEY': "" + 'AWS_SECRET_KEY': "", + 'AWS_IS_SECURE': True } } - upload_backup = raw_input("Do you want to upload the backup file to S3? (true / false) ") + + upload_backup = input("Do you want to upload the backup file to S3? (true / false) ") if upload_backup.lower() == 'true': - custom_config['UPLOAD_TO_S3']['S3_BUCKET'] = raw_input("What is the S3 bucket name? ") - custom_config['UPLOAD_TO_S3']['AWS_ACCESS_KEY'] = raw_input("What is your AWS access key? ") - custom_config['UPLOAD_TO_S3']['AWS_SECRET_KEY'] = raw_input("What is your AWS secret key? ") + custom_config['UPLOAD_TO_S3']['AWS_ENDPOINT_URL'] = input("What is your AWS endpoint url? ") + custom_config['UPLOAD_TO_S3']['AWS_REGION'] = input("What is your AWS region? ") + custom_config['UPLOAD_TO_S3']['S3_BUCKET'] = input("What is the S3 bucket name? ") + custom_config['UPLOAD_TO_S3']['S3_DIR'] = input("What is the S3 directory for upload? (example Atlassian/) ") + custom_config['UPLOAD_TO_S3']['AWS_ACCESS_KEY'] = input("What is your AWS access key? ") + custom_config['UPLOAD_TO_S3']['AWS_SECRET_KEY'] = input("What is your AWS secret key? ") + custom_config['UPLOAD_TO_S3']['AWS_IS_SECURE'] = input("Do you want to use SSL? (true / false) ") - config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json') + config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml') with open(config_path, 'w+') as config_file: - json.dump(custom_config, config_file) + yaml.dump(custom_config, config_file, default_flow_style=False) + + +if __name__ == "__main__": + create_config()