diff --git a/.github/workflows/azure_integration_test.yaml b/.github/workflows/azure_integration_test.yaml new file mode 100644 index 000000000..1b39d4dfb --- /dev/null +++ b/.github/workflows/azure_integration_test.yaml @@ -0,0 +1,65 @@ +name: Azure Integration Tests + +on: + push: + branches: + - main # Trigger the workflow when code is pushed to the main branch + pull_request: + branches: + - main # Trigger the workflow when the PR targets the main branch + workflow_dispatch: # Allows manual triggering of the workflow + +env: + GOOS: linux + GO111MODULE: on + +jobs: + test-azure: + name: Run Azure Integration Tests + runs-on: ubuntu-24.04 + env: + AZURE_APP_ID: ${{ secrets.AZURE_APP_ID }} + AZURE_PASSWORD: ${{ secrets.AZURE_PASSWORD }} + AZURE_TENANT: ${{ secrets.AZURE_TENANT }} + + steps: + - name: Check if environment variables are set # Validate secrets are passed + run: | + if [[ -z "$AZURE_APP_ID" ]]; then + echo "AZURE_APP_ID is not set. Please check if secrets.AZURE_APP_ID is in the repository." + exit 1 + fi + if [[ -z "$AZURE_PASSWORD" ]]; then + echo "AZURE_PASSWORD is not set. Please check if secrets.AZURE_PASSWORD is in the repository." + exit 1 + fi + if [[ -z "$AZURE_TENANT" ]]; then + echo "AZURE_TENANT is not set. Please check if secrets.AZURE_TENANT is in the repository." + exit 1 + fi + + - name: Checkout GitHub Repository + uses: actions/checkout@v4 + with: + lfs: true + + - name: Install Azure CLI + run: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + az --version + + - name: Install Golang + uses: actions/setup-go@v5 + with: + go-version: 1.22 + + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Azure CLI Login Using Service Principal + run: az login --service-principal --username "$AZURE_APP_ID" --password "$AZURE_PASSWORD" --tenant "$AZURE_TENANT" + + - name: Run Azure-related integration test + run: go test -v ./pkg/driver/deployment/azure_functions_test.go diff --git a/.github/workflows/e2e_azure.yaml b/.github/workflows/e2e_azure.yaml new file mode 100644 index 000000000..59082d6e6 --- /dev/null +++ b/.github/workflows/e2e_azure.yaml @@ -0,0 +1,68 @@ +name: End-to-End Azure Functions Tests + +on: + push: + branches: + - main # Trigger the workflow when code is pushed to the main branch + pull_request: + branches: + - main # Trigger the workflow when the PR targets the main branch + workflow_dispatch: # Allows manual triggering of the workflow + +env: + GOOS: linux + GO111MODULE: on + +jobs: + test-azure: + name: Test E2E Azure Functions Cloud Deployment + runs-on: ubuntu-24.04 + env: + AZURE_APP_ID: ${{ secrets.AZURE_APP_ID }} + AZURE_PASSWORD: ${{ secrets.AZURE_PASSWORD }} + AZURE_TENANT: ${{ secrets.AZURE_TENANT }} + + steps: + - name: Check if environment variables are set # Validate secrets are passed + run: | + if [[ -z "$AZURE_APP_ID" ]]; then + echo "AZURE_APP_ID is not set. Please check if secrets.AZURE_APP_ID is in the repository." + exit 1 + fi + if [[ -z "$AZURE_PASSWORD" ]]; then + echo "AZURE_PASSWORD is not set. Please check if secrets.AZURE_PASSWORD is in the repository." + exit 1 + fi + if [[ -z "$AZURE_TENANT" ]]; then + echo "AZURE_TENANT is not set. Please check if secrets.AZURE_TENANT is in the repository." + exit 1 + fi + + - name: Checkout GitHub Repository + uses: actions/checkout@v4 + with: + lfs: true + + - name: Install Azure CLI + run: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + az --version + + - name: Install Golang + uses: actions/setup-go@v5 + with: + go-version: 1.22 + + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Azure CLI Login Using Service Principal + run: az login --service-principal --username "$AZURE_APP_ID" --password "$AZURE_PASSWORD" --tenant "$AZURE_TENANT" + + - name: Build and Run Loader + run: go run cmd/loader.go --config cmd/config_azure_trace.json + + - name: Check the output + run: test -f "data/out/experiment_duration_5.csv" && test $(grep true data/out/experiment_duration_5.csv | wc -l) -eq 0 # test the output file for errors (true means failure to invoke) diff --git a/.gitignore b/.gitignore index 8bfb932c0..0bb1ca3ac 100644 --- a/.gitignore +++ b/.gitignore @@ -211,3 +211,9 @@ tools/plotter/test-out data/traces/azure_* data/traces/day* + +# Azure deployment +azure_functions_for_zip/ +azurefunctions_setup/shared_azure_workload/exec_func.py +azure_functions_for_zip_test/ +function*.zip diff --git a/azurefunctions_setup/azurefunctionsconfig.yaml b/azurefunctions_setup/azurefunctionsconfig.yaml new file mode 100644 index 000000000..e2d38a560 --- /dev/null +++ b/azurefunctions_setup/azurefunctionsconfig.yaml @@ -0,0 +1,6 @@ +# azurefunctionsconfig.yaml +azurefunctionsconfig: + resource_group: invitro-rg # Name of the resource group + storage_account_name: invitrostorage # Name of the storage account + function_app_name: invitro-functionapp # Name of the function app + location: EastUS # Region where resource created diff --git a/azurefunctions_setup/host.json b/azurefunctions_setup/host.json new file mode 100644 index 000000000..cc38758bf --- /dev/null +++ b/azurefunctions_setup/host.json @@ -0,0 +1,16 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "excludedTypes": "Request" + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.*, 5.0.0)" + } + +} diff --git a/azurefunctions_setup/local.settings.json b/azurefunctions_setup/local.settings.json new file mode 100644 index 000000000..19ed409f0 --- /dev/null +++ b/azurefunctions_setup/local.settings.json @@ -0,0 +1,8 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "AzureWebJobsFeatureFlags": "EnableWorkerIndexing", + "AzureWebJobsStorage": "" + } +} \ No newline at end of file diff --git a/azurefunctions_setup/requirements.txt b/azurefunctions_setup/requirements.txt new file mode 100644 index 000000000..a69ceeaef --- /dev/null +++ b/azurefunctions_setup/requirements.txt @@ -0,0 +1,3 @@ +azure-functions +numpy>=1.21,<1.26 +psutil>=5.9,<6.0 diff --git a/azurefunctions_setup/shared_azure_workload/azurefunctionsworkload.py b/azurefunctions_setup/shared_azure_workload/azurefunctionsworkload.py new file mode 100644 index 000000000..d726cd0a8 --- /dev/null +++ b/azurefunctions_setup/shared_azure_workload/azurefunctionsworkload.py @@ -0,0 +1,56 @@ +import time +import socket +import json +import azure.functions as func +import logging + +# Note: exec_func.py is not stored here permanently. It is copied from server/trace-func-py during setup. +from .exec_func import execute_function + +# Global variable for hostname +hostname = socket.gethostname() + +def main(req: func.HttpRequest) -> func.HttpResponse: + logging.info("Processing request.") + + start_time = time.time() + + # Parse JSON request body + try: + req_body = req.get_json() + logging.info(f"Request body: {req_body}") + except ValueError: + logging.error("Invalid JSON received.") + return func.HttpResponse( + json.dumps({"error": "Invalid JSON"}), + status_code=400, + mimetype="application/json" + ) + + runtime_milliseconds = req_body.get('RuntimeInMilliSec', 1000) + memory_mebibytes = req_body.get('MemoryInMebiBytes', 128) + + logging.info(f"Runtime requested: {runtime_milliseconds} ms, Memory: {memory_mebibytes} MiB") + + # Call the execute_function, which needs to be copied from server/trace-func-py/exec_func.py + duration = execute_function("",runtime_milliseconds,memory_mebibytes) + result_msg = f"Workload completed in {duration} microseconds" + + # Prepare the response + response = { + "Status": "Success", + "Function": req.url.split("/")[-1], + "MachineName": hostname, + "ExecutionTime": int((time.time() - start_time) * 1_000_000), # Total time (includes HTTP, workload, and response prep) + "DurationInMicroSec": duration, # Time spent on the workload itself + "MemoryUsageInKb": memory_mebibytes * 1024, + "Message": result_msg + } + + logging.info(f"Response: {response}") + + return func.HttpResponse( + json.dumps(response), + status_code=200, + mimetype="application/json" + ) diff --git a/azurefunctions_setup/shared_azure_workload/function.json b/azurefunctions_setup/shared_azure_workload/function.json new file mode 100644 index 000000000..e2945d59e --- /dev/null +++ b/azurefunctions_setup/shared_azure_workload/function.json @@ -0,0 +1,18 @@ +{ + "bindings": [ + { + "authLevel": "anonymous", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": ["post"] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ], + "scriptFile": "azurefunctionsworkload.py" +} + \ No newline at end of file diff --git a/cmd/config_azure_trace.json b/cmd/config_azure_trace.json new file mode 100644 index 000000000..002b95df1 --- /dev/null +++ b/cmd/config_azure_trace.json @@ -0,0 +1,28 @@ +{ + "Seed": 42, + + "Platform": "AzureFunctions", + "InvokeProtocol" : "http1", + "EndpointPort": 80, + + "BusyLoopOnSandboxStartup": false, + + "TracePath": "data/traces/example", + "Granularity": "minute", + "OutputPathPrefix": "data/out/experiment", + "IATDistribution": "exponential", + "CPULimit": "1vCPU", + "ExperimentDuration": 5, + "WarmupDuration": 0, + + "IsPartiallyPanic": false, + "EnableZipkinTracing": false, + "EnableMetricsScrapping": false, + "MetricScrapingPeriodSeconds": 15, + "AutoscalingMetric": "concurrency", + + "GRPCConnectionTimeoutSeconds": 15, + "GRPCFunctionTimeoutSeconds": 900, + + "DAGMode": false +} \ No newline at end of file diff --git a/cmd/loader.go b/cmd/loader.go index d2f20683a..fd586182d 100644 --- a/cmd/loader.go +++ b/cmd/loader.go @@ -95,6 +95,7 @@ func main() { common.PlatformOpenWhisk, common.PlatformAWSLambda, common.PlatformDirigent, + common.PlatformAzureFunctions, } if !slices.Contains(supportedPlatforms, cfg.Platform) { log.Fatal("Unsupported platform!") @@ -149,7 +150,7 @@ func parseYAMLSpecification(cfg *config.LoaderConfiguration) string { case "firecracker": return "workloads/firecracker/trace_func_go.yaml" default: - if cfg.Platform != common.PlatformDirigent { + if cfg.Platform != common.PlatformDirigent && cfg.Platform != common.PlatformAzureFunctions { log.Fatal("Invalid 'YAMLSelector' parameter.") } } diff --git a/docs/loader.md b/docs/loader.md index afa2d07a8..2cbe12680 100644 --- a/docs/loader.md +++ b/docs/loader.md @@ -284,4 +284,61 @@ Note: - Under `Manage Quota`, select `AWS Lambda` service and click `View quotas` (Alternatively, click [here](https://us-east-1.console.aws.amazon.com/servicequotas/home/services/lambda/quotas)) - Under `Quota name`, select `Concurrent executions` and click `Request increase at account level` (Alternatively, click [here](https://us-east-1.console.aws.amazon.com/servicequotas/home/services/lambda/quotas/L-B99A9384)) - Under `Increase quota value`, input `1000` and click `Request` - - Await AWS Support Team to approve the request. The request may take several days or weeks to be approved. \ No newline at end of file + - Await AWS Support Team to approve the request. The request may take several days or weeks to be approved. + +## Using Azure Functions + +**Pre-requisites:** +1. Microsoft Azure account with an active subscription ID +2. Existing Service Principal for authentication (refer to Notes section) +3. Go installed +4. Python3 installed + +**Quick Setup for Azure Deployment:** +1. Install the Azure CLI and verify installation: + ```bash + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + az --version + ``` +2. Use existing Service Principal credentials in order login to Azure. + ```bash + az login --service-principal --username $AZURE_APP_ID --password $AZURE_PASSWORD --tenant $AZURE_TENANT + ``` + > Refer to Note section for generation of Service Principal credentials +3. Start the Azure Functions deployment experiment: + ```bash + go run cmd/loader.go --config cmd/config_azure_trace.json + ``` +--- +Notes: + +- A Service Principal must be created before running the experiment, as some environments (e.g., CloudLab nodes) do not support interactive login or browser-based authentication. You can create the required Service Principal using the Azure Portal. +1. Login to [Azure Portal](https://portal.azure.com) and search for **"App registrations"**. +2. Click **+ New registration**: + - Name: `InVitro` + - Supported account types: *Single tenant* (default) + - Skip Redirect URI + - Click **Register** +3. Once registered, note down the following: + - **Application (client) ID** → This is your `AZURE_APP_ID` + - **Directory (tenant) ID** → This is your `AZURE_TENANT` +4. Click **Add a certificate or secret** + - Click **+ New client secret** + - Add a description and choose an expiry (e.g., 6 months) + - Click **Add** + - Copy the secret **value** → This is your `AZURE_PASSWORD` +5. Assign roles: + - Go to **Subscriptions** → Select your subscription + - Click **Access Control (IAM)** → **+ Add** → **Add role assignment** + - Choose **Contributor** under "Privileged administrator roles" + - Assign access to: *User, group, or service principal* + - Under "Members", search for your new app registration (`InVitro`) + - Click **Review + Assign** +6. Set the following environment variables in your experiment environment: + ```bash + export AZURE_APP_ID= + export AZURE_PASSWORD= + export AZURE_TENANT= + ``` +- Current deployment is via ZIP +- Python is used for deployment workload as Go is not supported in Consumption Plan \ No newline at end of file diff --git a/docs/multi_loader.md b/docs/multi_loader.md index 7a66350ea..75e459289 100644 --- a/docs/multi_loader.md +++ b/docs/multi_loader.md @@ -31,7 +31,7 @@ More information regarding the metrics that can be collected at the end of each - **activator** – Captures Knative Activator logs which includes health and readiness status updates for service endpoints it manages - **autoscaler** – Tracks Knative autoscaler decisions, scaling events, and resource utilization - **top** – Provides CPU and memory usage statistics for each nodes -- **prometheus** – Captures a snapshot of Prometheus’s TSDB, which includes system-wide performance metrics. The snapshot can be restored in an active Prometheus instance for further analysis. For details, see the *Restore Prometheus Data* section in [this guide](https://devopstales.github.io/home/backup-and-retore-prometheus/). +- **prometheus** – Captures a snapshot of Prometheus’s TSDB, which includes system-wide performance metrics. The snapshot can be restored in an active Prometheus instance for further analysis. For details, see the *Restore Prometheus Data* section in [this guide](https://devopstales.github.io/kubernetes/backup-and-retore-prometheus/). ### LoaderStudy diff --git a/pkg/common/constants.go b/pkg/common/constants.go index d6e1ecc47..381e78afa 100644 --- a/pkg/common/constants.go +++ b/pkg/common/constants.go @@ -108,10 +108,11 @@ var ValidCPULimits = []string{CPULimit1vCPU, CPULimitGCP} // platform const ( - PlatformKnative string = "knative" - PlatformDirigent string = "dirigent" - PlatformOpenWhisk string = "openwhisk" - PlatformAWSLambda string = "awslambda" + PlatformKnative string = "knative" + PlatformDirigent string = "dirigent" + PlatformOpenWhisk string = "openwhisk" + PlatformAWSLambda string = "awslambda" + PlatformAzureFunctions string = "azurefunctions" ) // dirigent backend diff --git a/pkg/common/utilities.go b/pkg/common/utilities.go index 9c0f33174..41df445c0 100644 --- a/pkg/common/utilities.go +++ b/pkg/common/utilities.go @@ -27,8 +27,10 @@ package common import ( "encoding/json" "hash/fnv" + "io" "log" "math/rand" + "os" "os/exec" "strconv" "strings" @@ -189,3 +191,25 @@ func ParseLogMessage(logString string) string { } return logString } + +// Helper function to copy files +func CopyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + if err != nil { + return err + } + + return destFile.Sync() +} diff --git a/pkg/driver/clients/azure_client.go b/pkg/driver/clients/azure_client.go new file mode 100644 index 000000000..1685be519 --- /dev/null +++ b/pkg/driver/clients/azure_client.go @@ -0,0 +1,126 @@ +package clients + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "github.com/vhive-serverless/loader/pkg/common" + mc "github.com/vhive-serverless/loader/pkg/metric" +) + +type azureFunctionsInvoker struct { + announceDoneExe *sync.WaitGroup +} + +func newAzureFunctionsInvoker(announceDoneExe *sync.WaitGroup) *azureFunctionsInvoker { + return &azureFunctionsInvoker{ + announceDoneExe: announceDoneExe, + } +} + +func (i *azureFunctionsInvoker) Invoke(function *common.Function, runtimeSpec *common.RuntimeSpecification) (bool, *mc.ExecutionRecord) { + log.Tracef("(Invoke)\t %s: %d[ms], %d[MiB]", function.Name, runtimeSpec.Runtime, runtimeSpec.Memory) + + dataString := fmt.Sprintf(`{"RuntimeInMilliSec": %d, "MemoryInMebiBytes": %d}`, runtimeSpec.Runtime, runtimeSpec.Memory) + success, executionRecordBase, res, bodyBytes := azureHttpInvocation(dataString, function) + + executionRecordBase.RequestedDuration = uint32(runtimeSpec.Runtime * 1e3) + record := &mc.ExecutionRecord{ExecutionRecordBase: *executionRecordBase} + + if !success { + return false, record + } + + // Create a variable to store the JSON data + var httpResBody HTTPResBody + // Unmarshal the response body into the JSON object + if err := json.Unmarshal(bodyBytes, &httpResBody); err != nil { + log.Errorf("Error unmarshaling JSON:%s", err) + return false, record + } + + record.ActualDuration = httpResBody.DurationInMicroSec + record.ActualMemoryUsage = common.Kib2Mib(httpResBody.MemoryUsageInKb) + + logInvocationSummary(function, &record.ExecutionRecordBase, res) + + return true, record +} + +func azureHttpInvocation(dataString string, function *common.Function) (bool, *mc.ExecutionRecordBase, *http.Response, []byte) { + record := &mc.ExecutionRecordBase{} + + start := time.Now() + record.StartTime = start.UnixMicro() + record.Instance = function.Name + requestURL := function.Endpoint + + // Prepare request body for POST + reqBody := bytes.NewBuffer([]byte(dataString)) + + // Use POST method with JSON payload as body + req, err := http.NewRequest(http.MethodPost, requestURL, reqBody) + if err != nil { + log.Errorf("http request creation failed for function %s - %v", function.Name, err) + + record.ResponseTime = time.Since(start).Microseconds() + record.ConnectionTimeout = true + + return false, record, nil, nil + } + + req.Header.Set("Content-Type", "application/json") // JSON payload for POST + + resp, err := http.DefaultClient.Do(req) + if err != nil { + log.Errorf("http request for function %s failed - %v", function.Name, err) + + record.ResponseTime = time.Since(start).Microseconds() + record.ConnectionTimeout = true + + return false, record, resp, nil + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + log.Errorf("Received non-2xx status code for function %s - error code: %s", function.Name, resp.Status) + + record.ResponseTime = time.Since(start).Microseconds() + record.ConnectionTimeout = true + + return false, record, resp, nil + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + log.Errorf("Failed to read response body for function %s - %v", function.Name, err) + + record.ResponseTime = time.Since(start).Microseconds() + record.FunctionTimeout = true + + return false, record, resp, nil + } + + var deserializedResponse FunctionResponse + err = json.Unmarshal(bodyBytes, &deserializedResponse) + if err != nil { + log.Errorf("Failed to deserialize response %s - %v", function.Name, err) + + record.ResponseTime = time.Since(start).Microseconds() + record.FunctionTimeout = true + + return false, record, resp, nil + } + + record.Instance = deserializedResponse.Function + record.ResponseTime = time.Since(start).Microseconds() + record.ActualDuration = uint32(deserializedResponse.ExecutionTime) + + return true, record, resp, bodyBytes +} diff --git a/pkg/driver/clients/invoker.go b/pkg/driver/clients/invoker.go index 82d1e5137..6159841ed 100644 --- a/pkg/driver/clients/invoker.go +++ b/pkg/driver/clients/invoker.go @@ -18,6 +18,8 @@ func CreateInvoker(cfg *config.Configuration, announceDoneExe *sync.WaitGroup, r switch strings.ToLower(cfg.LoaderConfiguration.Platform) { case common.PlatformAWSLambda: return newAWSLambdaInvoker(announceDoneExe) + case common.PlatformAzureFunctions: + return newAzureFunctionsInvoker(announceDoneExe) case common.PlatformDirigent: if cfg.DirigentConfiguration == nil { logrus.Fatal("Failed to create invoker: dirigent configuration is required for platform 'dirigent'") diff --git a/pkg/driver/deployment/azure_functions.go b/pkg/driver/deployment/azure_functions.go new file mode 100644 index 000000000..0676cde98 --- /dev/null +++ b/pkg/driver/deployment/azure_functions.go @@ -0,0 +1,409 @@ +package deployment + +import ( + "fmt" + "os" + "os/exec" + "time" + + "path/filepath" + + log "github.com/sirupsen/logrus" + "github.com/vhive-serverless/loader/pkg/common" + "github.com/vhive-serverless/loader/pkg/config" + "gopkg.in/yaml.v3" +) + +// Config struct to hold Azure Function deployment configuration +type Config struct { + AzureConfig struct { + ResourceGroup string `yaml:"resource_group"` + StorageAccountName string `yaml:"storage_account_name"` + FunctionAppName string `yaml:"function_app_name"` + Location string `yaml:"location"` + } `yaml:"azurefunctionsconfig"` +} + +type azureFunctionsDeployer struct { + functions []*common.Function + config *Config +} + +func newAzureFunctionsDeployer() *azureFunctionsDeployer { + return &azureFunctionsDeployer{} +} + +func (afd *azureFunctionsDeployer) Deploy(cfg *config.Configuration) { + afd.functions = cfg.Functions + afd.config = DeployAzureFunctions(afd.functions) +} + +func (afd *azureFunctionsDeployer) Clean() { + CleanAzureFunctions(afd.config, afd.functions) +} + +func DeployAzureFunctions(functions []*common.Function) *Config { + // 1. Copy exec_func.py to azurefunctions_setup + // 2. Initialize resources required for Azure Functions deployment + // 3. Create function folders + // 4. Zip function folders + // 5. Deploy the functions to Azure Functions + + // Load azurefunctionsconfig yaml file + config, err := LoadConfig("azurefunctions_setup/azurefunctionsconfig.yaml") + if err != nil { + log.Fatalf("Error loading azure functions config yaml: %s", err) + } + + // Set unique names for Azure Resources + timestamp := time.Now().Format("150405") // HHMMSS format + config.AzureConfig.ResourceGroup = fmt.Sprintf("%s-%s", config.AzureConfig.ResourceGroup, timestamp) // invitro-rg-XXXXXX + config.AzureConfig.StorageAccountName = fmt.Sprintf("%s%s", config.AzureConfig.StorageAccountName, timestamp) // invitrostorageXXXXXX + config.AzureConfig.FunctionAppName = fmt.Sprintf("%s-%s", config.AzureConfig.FunctionAppName, timestamp) // invitro-functionapp-XXXXXX + + // Define the base directory containing functions to be zipped individually + baseDir := "azure_functions_for_zip" + sharedWorkloadDir := filepath.Join("azurefunctions_setup", "shared_azure_workload") + zipBaseDir := "." + + // 1. Run script to copy workload + if err := CopyPythonWorkload("server/trace-func-py/exec_func.py", "azurefunctions_setup/shared_azure_workload/exec_func.py"); err != nil { + log.Fatalf("Error copying Python workload: %s", err) + } + + // 2. Initialize resources required for Azure Functions deployment + InitAzureFunctions(config, functions) + + // 3. Create function folders + if err := CreateFunctionFolders(baseDir, sharedWorkloadDir, functions); err != nil { + log.Fatalf("Error setting up function folders required for zipping: %s", err) + } + + // 4. Zip function folders + if err := ZipFunctionAppFiles(baseDir, functions); err != nil { + log.Fatalf("Error zipping function app files for deployment: %s", err) + } + + // 5. Deploy the function to Azure Functions + if err := DeployFunctions(config, zipBaseDir, functions); err != nil { + log.Fatalf("Error deploying function: %s", err) + } + + return config +} + +func CleanAzureFunctions(config *Config, functions []*common.Function) { + log.Infof("Performing cleanup of experiment...") + + baseDir := "azure_functions_for_zip" + + // Call the cleanup function to delete temp folders and files + if err := CleanUpDeploymentFiles(baseDir, functions); err != nil { + log.Errorf("Error during cleanup of local files: %s", err) + } else { + log.Debug("Cleanup of temp folders zip files completed successfully.") + } + + // Delete Azure resources + if err := DeleteResourceGroup(config); err != nil { + log.Errorf("Error during Azure resource cleanup: %v", err) + } else { + log.Infof("Cleanup completed successfully.") + } +} + +/* Copy the exec_func.py file to the destination using the CopyFile function from utilities */ +func CopyPythonWorkload(srcPath, dstPath string) error { + log.Infof("Copying workload...") + if err := common.CopyFile(srcPath, dstPath); err != nil { + return fmt.Errorf("failed to copy exec_func.py to %s: %w", dstPath, err) + } + log.Infof("Workload copied successfully.") + return nil +} + +/* Functions for initializing resources required for Azure Functions deployment */ + +func InitAzureFunctions(config *Config, functions []*common.Function) { + // 1. Create Resource Group + // 2. Create Storage Account + // 3. Create Function Apps + Set Settings For Each App + + // 1. Create Resource Group + if err := CreateResourceGroup(config); err != nil { + log.Fatalf("Error during Resource Group creation: %s", err) + } + + // 2. Create Storage Account + if err := CreateStorageAccount(config); err != nil { + + cleanupErr := DeleteResourceGroup(config) + if cleanupErr != nil { + log.Errorf("Failed to delete resource group during cleanup: %v", cleanupErr) + } + + log.Fatalf("Error during Storage Account creation: %s", err) + } + + // 3. Create Function Apps + Set Settings For Each App + for i := 0; i < len(functions); i++ { + functionAppName := fmt.Sprintf("%s-%d", config.AzureConfig.FunctionAppName, i) + + if err := CreateFunctionApp(config, functionAppName); err != nil { + + cleanupErr := DeleteResourceGroup(config) + if cleanupErr != nil { + log.Errorf("Failed to delete resource group during cleanup: %v", cleanupErr) + } + + log.Fatalf("Error during Function App creation: %s", err) + } + + // Set SCM_DO_BUILD_DURING_DEPLOYMENT + if err := SetSCMSettings(config, functionAppName); err != nil { + + cleanupErr := DeleteResourceGroup(config) + if cleanupErr != nil { + log.Errorf("Failed to delete resource group during cleanup: %v", cleanupErr) + } + + log.Fatalf("failed to set SCM settings: %s", err) + } + + // Set ENABLE_ORYX_BUILD + if err := SetORYXSettings(config, functionAppName); err != nil { + + cleanupErr := DeleteResourceGroup(config) + if cleanupErr != nil { + log.Errorf("Failed to delete resource group during cleanup: %v", cleanupErr) + } + + log.Fatalf("failed to set Oryx settings: %s", err) + } + } + + log.Info("Azure Functions environment for deployment initialized successfully.") +} + +// LoadConfig reads the YAML configuration file +func LoadConfig(filePath string) (*Config, error) { + config := &Config{} + data, err := os.ReadFile(filePath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(data, config) + return config, err +} + +// CreateResourceGroup creates an Azure Resource Group: {az group create --name --location } +func CreateResourceGroup(config *Config) error { + createResourceGroupCmd := exec.Command("az", "group", "create", + "--name", config.AzureConfig.ResourceGroup, + "--location", config.AzureConfig.Location) + + if err := createResourceGroupCmd.Run(); err != nil { + return fmt.Errorf("failed to create resource group: %w", err) + } + + log.Debugf("Resource group %s created successfully.", config.AzureConfig.ResourceGroup) + return nil +} + +// CreateStorageAccount creates an Azure Storage Account : {az storage account create --name --resource-group --location --sku Standard_LRS} +func CreateStorageAccount(config *Config) error { + cmd := exec.Command("az", "storage", "account", "create", + "--name", config.AzureConfig.StorageAccountName, + "--resource-group", config.AzureConfig.ResourceGroup, + "--location", config.AzureConfig.Location, + "--sku", "Standard_LRS") + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to create storage account: %w", err) + } + + log.Debugf("Storage account %s created successfully.", config.AzureConfig.StorageAccountName) + return nil +} + +// CreateFunctionApp creates an Azure Function App: {az functionapp create --name --resource-group --storage-account --consumption-plan-location --runtime python --runtime-version 3.10 --os-type linux --functions-version 4} +func CreateFunctionApp(config *Config, functionAppName string) error { + cmd := exec.Command("az", "functionapp", "create", + "--name", functionAppName, + "--resource-group", config.AzureConfig.ResourceGroup, + "--storage-account", config.AzureConfig.StorageAccountName, + "--consumption-plan-location", config.AzureConfig.Location, + "--runtime", "python", + "--runtime-version", "3.10", + "--os-type", "linux", + "--functions-version", "4") + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to create function app: %w", err) + } + + log.Infof("Function app %s created successfully.", functionAppName) + return nil +} + +// SetSCMSettings configures remote build settings for the Azure Function App +func SetSCMSettings(config *Config, functionAppName string) error { + cmd := exec.Command("az", "functionapp", "config", "appsettings", "set", + "--name", functionAppName, + "--resource-group", config.AzureConfig.ResourceGroup, + "--settings", "SCM_DO_BUILD_DURING_DEPLOYMENT=true") + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to set SCM_DO_BUILD_DURING_DEPLOYMENT: %w", err) + } + + log.Debugf("SCM_DO_BUILD_DURING_DEPLOYMENT setting configured successfully.") + return nil +} + +// SetORYXSettings configures remote build settings for the Azure Function App +func SetORYXSettings(config *Config, functionAppName string) error { + cmd := exec.Command("az", "functionapp", "config", "appsettings", "set", + "--name", functionAppName, + "--resource-group", config.AzureConfig.ResourceGroup, + "--settings", "ENABLE_ORYX_BUILD=true") + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to set ENABLE_ORYX_BUILD: %w", err) + } + + log.Debugf("ENABLE_ORYX_BUILD setting configured successfully.") + return nil +} + +/* Function to create folders and copy files to the folders */ + +func CreateFunctionFolders(baseDir, sharedWorkloadDir string, functions []*common.Function) error { + for i := 0; i < len(functions); i++ { + folderName := fmt.Sprintf("function%d", i) + folderPath := filepath.Join(baseDir, folderName) + + // Create the function folder + if err := os.MkdirAll(folderPath, os.ModePerm); err != nil { + return fmt.Errorf("failed to create folder %s: %w", folderPath, err) + } + + // Build full paths to shared workload files + workloadPy := filepath.Join(sharedWorkloadDir, "azurefunctionsworkload.py") + execFuncPy := filepath.Join(sharedWorkloadDir, "exec_func.py") + functionJSON := filepath.Join(sharedWorkloadDir, "function.json") + + // Copy files into the function folder + if err := common.CopyFile(workloadPy, filepath.Join(folderPath, "azurefunctionsworkload.py")); err != nil { + return fmt.Errorf("failed to copy azurefunctionsworkload.py to %s: %w", folderPath, err) + } + if err := common.CopyFile(execFuncPy, filepath.Join(folderPath, "exec_func.py")); err != nil { + return fmt.Errorf("failed to copy exec_func.py to %s: %w", folderPath, err) + } + if err := common.CopyFile(functionJSON, filepath.Join(folderPath, "function.json")); err != nil { + return fmt.Errorf("failed to copy function.json to %s: %w", folderPath, err) + } + } + + log.Debugf("Created %d function folders under %s", len(functions), baseDir) + return nil +} + +/* Functions for zipping created function folders */ + +func ZipFunctionAppFiles(baseDir string, functions []*common.Function) error { + for i := 0; i < len(functions); i++ { + folderName := fmt.Sprintf("function%d", i) + folderPath := filepath.Join(baseDir, folderName) + zipFileName := fmt.Sprintf("function%d.zip", i) + + // Use bash to zip the contents of azure_functions_for_zip/ along with host.json and requirements.txt + cmd := exec.Command("bash", "-c", + fmt.Sprintf("cd %s && zip -r ../%s %s && cd .. && zip -j %s azurefunctions_setup/host.json azurefunctions_setup/requirements.txt", baseDir, zipFileName, folderName, zipFileName)) + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to zip function folder %s into %s: %w", folderPath, zipFileName, err) + } + log.Debugf("Created zip file %s for function folder %s", zipFileName, folderName) + + } + log.Infof("Successfully zipped %d functions.", len(functions)) + return nil +} + +/* Function for deploying zipped functions */ + +func DeployFunctions(config *Config, baseDir string, functions []*common.Function) error { + log.Infof("Deploying %d functions to Azure Function Apps...", len(functions)) + + for i := 0; i < len(functions); i++ { + functionAppName := fmt.Sprintf("%s-%d", config.AzureConfig.FunctionAppName, i) + zipFileName := fmt.Sprintf("function%d.zip", i) + zipFilePath := filepath.Join(baseDir, zipFileName) + + // Deploy the zip file to Azure Function App using CLI + cmd := exec.Command("az", "functionapp", "deployment", "source", "config-zip", + "--name", functionAppName, + "--resource-group", config.AzureConfig.ResourceGroup, + "--src", zipFilePath, + "--build-remote", "true") + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to deploy %s to function app %s: %w", zipFilePath, functionAppName, err) + } + + // Storing endpoint for each function + functions[i].Endpoint = fmt.Sprintf("https://%s.azurewebsites.net/api/function%d", functionAppName, i) + log.Infof("Function %s set to %s", functions[i].Name, functions[i].Endpoint) + + } + log.Infof("Successfully deployed all %d functions.", len(functions)) + return nil +} + +/* Functions for clean up */ + +// Clean up temporary files and folders after deployment +func CleanUpDeploymentFiles(baseDir string, functions []*common.Function) error { + + // Remove the base directory containing function folders + if err := os.RemoveAll(baseDir); err != nil { + return fmt.Errorf("failed to remove directory %s: %w", baseDir, err) + } + log.Debugf("Successfully removed directory: %s", baseDir) + + // Remove each individual function zip file + for i := 0; i < len(functions); i++ { + zipFileName := fmt.Sprintf("function%d.zip", i) + if err := os.Remove(zipFileName); err != nil { + return fmt.Errorf("failed to remove zip file %s: %w", zipFileName, err) + } + log.Debugf("Successfully removed zip file: %s", zipFileName) + } + + // Remove the copied exec_func.py from shared workload + execFuncPath := "azurefunctions_setup/shared_azure_workload/exec_func.py" + if err := os.Remove(execFuncPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove copied exec_func.py: %w", err) + } + log.Debugf("Successfully removed copied exec_func.py from shared workload.") + return nil +} + +// DeleteResourceGroup deletes the Azure Resource Group +func DeleteResourceGroup(config *Config) error { + + // Construct the Azure CLI command to delete the resource group + dltResourceGrpCmd := exec.Command("az", "group", "delete", + "--name", config.AzureConfig.ResourceGroup, // Resource group name + "--yes", // Skip confirmation prompt + "--no-wait") + + // Execute the command + if err := dltResourceGrpCmd.Run(); err != nil { + return fmt.Errorf("failed to delete resource group: %w", err) + } + + log.Debugf("Resource group %s deleted successfully.", config.AzureConfig.ResourceGroup) + return nil +} diff --git a/pkg/driver/deployment/azure_functions_test.go b/pkg/driver/deployment/azure_functions_test.go new file mode 100644 index 000000000..59d38e909 --- /dev/null +++ b/pkg/driver/deployment/azure_functions_test.go @@ -0,0 +1,361 @@ +package deployment_test + +import ( + "archive/zip" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vhive-serverless/loader/pkg/common" + "github.com/vhive-serverless/loader/pkg/driver/deployment" +) + +/* --- TEST CASES --- */ + +// Tests copying of exec_func.py file and verifies the contents. +func TestCopyPythonWorkload(t *testing.T) { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + // Set source and destination paths for exec_func.py + srcPath := filepath.Join(root, "server", "trace-func-py", "exec_func.py") + dstPath := filepath.Join(root, "azurefunctions_setup", "shared_azure_workload", "exec_func.py") + + // Read content of source file + srcContent, err := os.ReadFile(srcPath) + assert.NoError(t, err) + + // Copy the file to destination + err = deployment.CopyPythonWorkload(srcPath, dstPath) + assert.NoError(t, err) + + // Ensure destination file is cleaned up automatically + t.Cleanup(func() { + _ = os.Remove(dstPath) + }) + + // Read content of destination file + dstContent, err := os.ReadFile(dstPath) + assert.NoError(t, err) + + // Verify the contents are the same + assert.Equal(t, string(srcContent), string(dstContent)) +} + +// Tests zip file contains the correct files and directory structure. +func TestZipHealth(t *testing.T) { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + // Set source and destination paths for exec_func.py + srcPath := filepath.Join(root, "server", "trace-func-py", "exec_func.py") + dstPath := filepath.Join(root, "azurefunctions_setup", "shared_azure_workload", "exec_func.py") + + baseDir := filepath.Join(root, "azure_functions_for_zip") + sharedWorkloadDir := filepath.Join(root, "azurefunctions_setup", "shared_azure_workload") + expectedFunctionCount := 2 + + expectedFunctionFiles := []string{ + "azurefunctionsworkload.py", + "exec_func.py", + "function.json", + } + + expectedRootFiles := []string{ + "requirements.txt", + "host.json", + } + + // Create test functions + functions := []*common.Function{ + {Name: "function0"}, + {Name: "function1"}, + } + + defer cleanupTestArtifacts(t, baseDir, functions) + + // Copy the file to destination + err = deployment.CopyPythonWorkload(srcPath, dstPath) + assert.NoError(t, err) + + // Create function folders + err = deployment.CreateFunctionFolders(baseDir, sharedWorkloadDir, functions) + assert.NoError(t, err, "Failed to create function folders") + + // Zip the function app files (each function separately) + err = deployment.ZipFunctionAppFiles(baseDir, functions) + assert.NoError(t, err, "Failed to create function app zips") + + // Validate each zip file + for i := 0; i < expectedFunctionCount; i++ { + zipFilePath := filepath.Join(root, fmt.Sprintf("function%d.zip", i)) + + // Check if the zip file exists + if _, err := os.Stat(zipFilePath); os.IsNotExist(err) { + t.Fatalf("Zip file does not exist: %s", zipFilePath) + } + + // Open zip file + r, err := zip.OpenReader(zipFilePath) + assert.NoError(t, err, "Failed to open zip file") + defer r.Close() + + // Prepare expected files map + expectedFiles := make(map[string]bool) + functionFolder := fmt.Sprintf("function%d/", i) + + for _, file := range expectedFunctionFiles { + expectedFiles[functionFolder+file] = false + } + for _, file := range expectedRootFiles { + expectedFiles[file] = false + } + + // Check the files inside the zip + for _, f := range r.File { + filePath := f.Name + filePath = strings.TrimPrefix(filePath, "./") // Normalize path + + if _, exists := expectedFiles[filePath]; exists { + expectedFiles[filePath] = true + } + } + + // Ensure all expected files are present + for file, found := range expectedFiles { + assert.True(t, found, "Missing expected file in zip: "+file) + } + + } + + t.Log("Zip file structure validation passed!") +} + +// Tests loading of config file in /azurefunctions_setup. +func TestLoadConfig(t *testing.T) { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + // Set path to config file + configPath := filepath.Join(root, "azurefunctions_setup", "azurefunctionsconfig.yaml") + + t.Logf("Looking for config at: %s", configPath) + + config, err := deployment.LoadConfig(configPath) + require.NoError(t, err, "Failed to load config") + + assert.NotEmpty(t, config.AzureConfig.ResourceGroup) + assert.NotEmpty(t, config.AzureConfig.StorageAccountName) + assert.NotEmpty(t, config.AzureConfig.FunctionAppName) + assert.NotEmpty(t, config.AzureConfig.Location) + + t.Log("Config loaded and validated successfully!") +} + +// Tests Azure infrastructure creation (Resource Group, Storage Account, Function App). +func TestAzureInfra(t *testing.T) { + + config, functionAppName := setupConfig(t) + defer cleanupAzureResources(t, config) + + err := deployment.CreateResourceGroup(config) + require.NoError(t, err, "Failed to create Resource Group") + + err = deployment.CreateStorageAccount(config) + require.NoError(t, err, "Failed to create Storage Account") + + err = deployment.CreateFunctionApp(config, functionAppName) + require.NoError(t, err, "Failed to create Function App") + + t.Logf("Function App %s created successfully!", functionAppName) +} + +// Tests deployment of zipped function to Azure Function App. +func TestDeployFunction(t *testing.T) { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + // Setup Azure login and config + config, functionAppName := setupConfig(t) + + // Prepare local zipped workload + functions := prepareZipFile(t) + + // Cleanup after test + defer cleanupTestArtifacts(t, "azure_functions_for_zip_test", functions) + defer cleanupAzureResources(t, config) + + err = deployment.CreateResourceGroup(config) + require.NoError(t, err, "Failed to create Resource Group") + + err = deployment.CreateStorageAccount(config) + require.NoError(t, err, "Failed to create Storage Account") + + err = deployment.CreateFunctionApp(config, functionAppName) + require.NoError(t, err, "Failed to create Function App") + + // Set SCM and ORYX settings for proper Azure deployment + err = deployment.SetSCMSettings(config, functionAppName) + require.NoError(t, err, "Failed to set SCM_DO_BUILD_DURING_DEPLOYMENT") + + err = deployment.SetORYXSettings(config, functionAppName) + require.NoError(t, err, "Failed to set ENABLE_ORYX_BUILD") + + // Deploy zip file to Function App + err = deployment.DeployFunctions(config, root, functions) + require.NoError(t, err, "Failed to deploy function") + + t.Log("Function deployment successful!") + + // Check if the endpoint is correctly set + expectedEndpoint := fmt.Sprintf("https://%s.azurewebsites.net/api/function%d", functionAppName, 0) + assert.Equal(t, expectedEndpoint, functions[0].Endpoint, "Function endpoint does not match expected") +} + +/* --- HELPER FUNCTIONS --- */ + +// Azure login +func setupAzureLogin(t *testing.T) { + + appID := os.Getenv("AZURE_APP_ID") + password := os.Getenv("AZURE_PASSWORD") + tenantID := os.Getenv("AZURE_TENANT") + + require.NotEmpty(t, appID, "AZURE_APP_ID must be set") + require.NotEmpty(t, password, "AZURE_PASSWORD must be set") + require.NotEmpty(t, tenantID, "AZURE_TENANT must be set") + + cmd := exec.Command("az", "login", + "--service-principal", + "--username", appID, + "--password", password, + "--tenant", tenantID, + ) + + err := cmd.Run() + require.NoError(t, err, "Azure login failed") + + t.Log("Azure login successful!") +} + +// Setup config (login, load config) +func setupConfig(t *testing.T) (*deployment.Config, string) { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + // Set path to config file + configPath := filepath.Join(root, "azurefunctions_setup", "azurefunctionsconfig.yaml") + + // Perform Azure login + setupAzureLogin(t) + + // Load config + config, err := deployment.LoadConfig(configPath) + require.NoError(t, err, "Failed to load config") + + // Set unique names for Azure Resources + timestamp := time.Now().Format("150405") // HHMMSS format + config.AzureConfig.ResourceGroup = fmt.Sprintf("unit-rg-%s", timestamp) + config.AzureConfig.StorageAccountName = fmt.Sprintf("unitstore%s", timestamp) + config.AzureConfig.FunctionAppName = fmt.Sprintf("unit-funcapp-%s", timestamp) + + functionAppName := fmt.Sprintf("%s-0", config.AzureConfig.FunctionAppName) + t.Log("Azure login and config setup completed.") + + return config, functionAppName +} + +// Prepare zipped function +func prepareZipFile(t *testing.T) []*common.Function { + + // Get current working directory + cwd, err := os.Getwd() + assert.NoError(t, err) + + // Construct root directory path + root := filepath.Join(cwd, "..", "..", "..") + + baseDir := filepath.Join(root, "azure_functions_for_zip_test") + sharedWorkloadDir := filepath.Join(root, "azurefunctions_setup", "shared_azure_workload") + srcPath := filepath.Join(root, "server", "trace-func-py", "exec_func.py") + dstPath := filepath.Join(sharedWorkloadDir, "exec_func.py") + + functions := []*common.Function{ + {Name: "testfunction0"}, + } + + err = deployment.CopyPythonWorkload(srcPath, dstPath) + require.NoError(t, err, "Failed to copy exec_func.py") + + err = deployment.CreateFunctionFolders(baseDir, sharedWorkloadDir, functions) + require.NoError(t, err, "Failed to create function folders") + + err = deployment.ZipFunctionAppFiles(baseDir, functions) + require.NoError(t, err, "Failed to create zip files") + + t.Log("Zip preparation completed!") + + return functions +} + +// Cleanup Azure RG +func cleanupAzureResources(t *testing.T, config *deployment.Config) { + + t.Logf("Cleaning up Resource Group: %s", config.AzureConfig.ResourceGroup) + err := deployment.DeleteResourceGroup(config) + assert.NoError(t, err, "Failed to cleanup Resource Group") +} + +// Local files cleanup +func cleanupTestArtifacts(t *testing.T, baseDir string, functions []*common.Function) { + + // Remove the copied exec_func.py from shared workload + execFuncPath := filepath.Join("azurefunctions_setup", "shared_azure_workload", "exec_func.py") + if err := os.Remove(execFuncPath); err != nil && !os.IsNotExist(err) { + t.Logf("Warning: failed to remove exec_func.py: %v", err) + } + + // Remove the test-specific function folders + if err := os.RemoveAll(baseDir); err != nil && !os.IsNotExist(err) { + t.Logf("Warning: failed to remove baseDir %s: %v", baseDir, err) + } + + // Remove the zipped function files (e.g., function0.zip) + for i := range functions { + zip := fmt.Sprintf("function%d.zip", i) + if err := os.Remove(zip); err != nil && !os.IsNotExist(err) { + t.Logf("Warning: failed to remove zip file %s: %v", zip, err) + } + } +} diff --git a/pkg/driver/deployment/deployer.go b/pkg/driver/deployment/deployer.go index 47202ee23..4775727e4 100644 --- a/pkg/driver/deployment/deployer.go +++ b/pkg/driver/deployment/deployer.go @@ -15,6 +15,8 @@ func CreateDeployer(cfg *config.Configuration) FunctionDeployer { switch cfg.LoaderConfiguration.Platform { case common.PlatformAWSLambda: return newAWSLambdaDeployer() + case common.PlatformAzureFunctions: + return newAzureFunctionsDeployer() case common.PlatformDirigent: return newDirigentDeployer() case common.PlatformKnative: diff --git a/server/trace-func-py/exec_func.py b/server/trace-func-py/exec_func.py new file mode 100644 index 000000000..68208fd19 --- /dev/null +++ b/server/trace-func-py/exec_func.py @@ -0,0 +1,26 @@ +import math +from time import process_time_ns +from numpy import empty, float32 +from psutil import virtual_memory + + +def execute_function(input, runTime, totalMem): + startTime = process_time_ns() + + chunkSize = 2**10 # size of a kb or 1024 + totalMem = totalMem*(2**10) # convert Mb to kb + memory = virtual_memory() + used = (memory.total - memory.available) // chunkSize # convert to kb + additional = max(1, (totalMem - used)) + array = empty(additional*chunkSize, dtype=float32) # make an uninitialized array of that size, uninitialized to keep it fast + # convert to ns + runTime = (runTime - 1)*(10**6) # -1 because it should be slighly below that runtime + memoryIndex = 0 + while process_time_ns() - startTime < runTime: + for i in range(0, chunkSize): + sin_i = math.sin(i) + cos_i = math.cos(i) + sqrt_i = math.sqrt(i) + array[memoryIndex + i] = sin_i + memoryIndex = (memoryIndex + chunkSize) % additional*chunkSize + return (process_time_ns() - startTime) // 1000 \ No newline at end of file diff --git a/server/trace-func-py/trace_func.py b/server/trace-func-py/trace_func.py index 5a2a14487..47c131d5e 100644 --- a/server/trace-func-py/trace_func.py +++ b/server/trace-func-py/trace_func.py @@ -24,15 +24,10 @@ import logging import datetime import grpc -import math -from os import getenv -from time import process_time_ns -from random import seed, randrange -from psutil import virtual_memory -from numpy import empty, float32 import faas_pb2 import faas_pb2_grpc +from exec_func import execute_function class Executor(faas_pb2_grpc.Executor): @@ -43,28 +38,6 @@ def Execute(self, request, context, **kwargs): elapsed_us = int(1000000 * elapsed.total_seconds()) return faas_pb2.FaasReply(latency=elapsed_us, response=str(response)) - -def execute_function(input, runTime, totalMem): - startTime = process_time_ns() - - chunkSize = 2**10 # size of a kb or 1024 - totalMem = totalMem*(2**10) # convert Mb to kb - memory = virtual_memory() - used = (memory.total - memory.available) // chunkSize # convert to kb - additional = max(1, (totalMem - used)) - array = empty(additional*chunkSize, dtype=float32) # make an uninitialized array of that size, uninitialized to keep it fast - # convert to ns - runTime = (runTime - 1)*(10**6) # -1 because it should be slighly bellow that runtime - memoryIndex = 0 - while process_time_ns() - startTime < runTime: - for i in range(0, chunkSize): - sin_i = math.sin(i) - cos_i = math.cos(i) - sqrt_i = math.sqrt(i) - array[memoryIndex + i] = sin_i - memoryIndex = (memoryIndex + chunkSize) % additional*chunkSize - return (process_time_ns() - startTime) // 1000 - def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) faas_pb2_grpc.add_ExecutorServicer_to_server(Executor(), server)