diff --git a/Submissions/002964524_SakhyathaYojani_Tammineni/Assigment1_002964525.ipynb b/Submissions/002964524_SakhyathaYojani_Tammineni/Assigment1_002964525.ipynb new file mode 100644 index 0000000..597b606 --- /dev/null +++ b/Submissions/002964524_SakhyathaYojani_Tammineni/Assigment1_002964525.ipynb @@ -0,0 +1,265 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "code", + "source": [ + "#Brick Pyramid Construction\n", + "def min_rows_to_build_pyramid(n):\n", + " # Initialize variables to keep track of rows and total bricks\n", + " rows = 0\n", + " bricks_used = 0\n", + "\n", + " while bricks_used < n:\n", + " rows += 1\n", + " bricks_used += rows # Calculate the number of bricks in the current row\n", + "\n", + "\n", + " if(n==bricks_used):\n", + " return rows\n", + " return rows - 1\n", + "\n", + "# Example usage:\n", + "result1 = min_rows_to_build_pyramid(7)\n", + "print(result1) # Output: 3\n", + "\n", + "result2 = min_rows_to_build_pyramid(15)\n", + "print(result2) # Output: 5\n", + "\n", + "result3 = min_rows_to_build_pyramid(1)\n", + "print(result3) # Output: 1\n", + "\n", + "result4 = min_rows_to_build_pyramid(20)\n", + "print(result4) # Output: 5\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "LqiHEhftfVkA", + "outputId": "22f55000-4828-4195-bcc4-2855e5888a11" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "3\n", + "5\n", + "1\n", + "5\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "#Course Registration Optimization\n", + "\n", + "def stable_matching(student_prefs, course_prefs, course_capacity):\n", + " num_students = len(student_prefs)\n", + " num_courses = len(course_prefs)\n", + "\n", + " # Initialize all students and courses as free and unassigned\n", + " student_free = [True] * num_students\n", + " course_assignment = [-1] * num_courses\n", + "\n", + " while True:\n", + " # Find an unassigned student\n", + " student = -1\n", + " for i in range(num_students):\n", + " if student_free[i]:\n", + " student = i\n", + " break\n", + "\n", + " # If all students are assigned, we have a stable assignment\n", + " if student == -1:\n", + " break\n", + "\n", + " # Iterate through student's preferences\n", + " for i in range(len(student_prefs[student])):\n", + " course = student_prefs[student][i]\n", + "\n", + " # Check if the course is at capacity\n", + " if course_capacity[course] > 0:\n", + " course_capacity[course] -= 1\n", + " course_assignment[course] = student\n", + " student_free[student] = False\n", + " break\n", + "\n", + " # If the course is full, check if the current student prefers it over the current assignment\n", + " for j in range(num_students):\n", + " if course_assignment[course] == student_prefs[course][j]:\n", + " student_prefs[course][j] = -1\n", + " student_free[course_assignment[course]] = True\n", + " course_assignment[course] = student\n", + " student_free[student] = False\n", + " break\n", + "\n", + " return course_assignment\n", + "\n", + "# Example usage\n", + "student_preferences = [\n", + " [0, 1, 2],\n", + " [1, 0, 2],\n", + " [0, 1, 2],\n", + "]\n", + "\n", + "course_preferences = [\n", + " [0, 1, 2],\n", + " [1, 0, 2],\n", + " [0, 1, 2],\n", + "]\n", + "\n", + "course_capacity = [2, 2, 2]\n", + "\n", + "result = stable_matching(student_preferences, course_preferences, course_capacity)\n", + "print(result)\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Y6q25uHkH84S", + "outputId": "6746b275-bdfe-46a9-8c10-d6edf8a44a65" + }, + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[2, 1, -1]\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "#Array Reordering Algorithm\n", + "\n", + "def reorder_array(arr):\n", + " even_nums = []\n", + " odd_nums = []\n", + "\n", + " for num in arr:\n", + " if num % 2 == 0:\n", + " even_nums.append(num)\n", + " else:\n", + " odd_nums.append(num)\n", + "\n", + " even_nums.sort()\n", + " odd_nums.sort()\n", + "\n", + " reordered_arr = even_nums + odd_nums\n", + " return reordered_arr\n", + "\n", + "# Example usage:\n", + "input_arr = [5, 2, 9, 4, 3, 6, 8, 1, 7]\n", + "output_arr = reorder_array(input_arr)\n", + "print(output_arr) # Output: [2, 4, 6, 8, 5, 9, 3, 1, 7]\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "9hJdXl8VNEf3", + "outputId": "535684a4-ef45-4180-b428-07b0e82e1b1b" + }, + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[2, 4, 6, 8, 1, 3, 5, 7, 9]\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "#Shortest Path in a Weighted Directed Graph\n", + "\n", + "import heapq\n", + "\n", + "def shortest_path(graph, source, target):\n", + " num_nodes = len(graph)\n", + " dist = [float('inf')] * num_nodes\n", + " dist[source] = 0\n", + " prev = [None] * num_nodes\n", + " visited = set()\n", + " pq = [(0, source)]\n", + "\n", + " while pq:\n", + " _, u = heapq.heappop(pq)\n", + "\n", + " if u == target:\n", + " path = []\n", + " while u is not None:\n", + " path.append(u)\n", + " u = prev[u]\n", + " return path[::-1]\n", + "\n", + " if u not in visited:\n", + " visited.add(u)\n", + " for v, weight in enumerate(graph[u]):\n", + " if weight > 0 and dist[u] + weight < dist[v]:\n", + " dist[v] = dist[u] + weight\n", + " prev[v] = u\n", + " heapq.heappush(pq, (dist[v], v))\n", + "\n", + " return []\n", + "\n", + "# Example usage:\n", + "graph = [\n", + " [0, 4, 0, 0, 0, 0],\n", + " [0, 0, 8, 0, 0, 0],\n", + " [0, 0, 0, 7, 0, 2],\n", + " [0, 2, 0, 0, 0, 0],\n", + " [0, 0, 0, 5, 0, 3],\n", + " [0, 0, 0, 0, 0, 0]\n", + "]\n", + "\n", + "source_node = 0\n", + "target_node = 5\n", + "shortest_path = shortest_path(graph, source_node, target_node)\n", + "print(shortest_path) # Output: [0, 1, 2, 5]\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "SbQb-oaDP8rF", + "outputId": "0e65f72d-9ddc-40c7-c3aa-1b06f58bd378" + }, + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[0, 1, 2, 5]\n" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment2_002964524.ipynb b/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment2_002964524.ipynb new file mode 100644 index 0000000..733d5cf --- /dev/null +++ b/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment2_002964524.ipynb @@ -0,0 +1,532 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "**1.Divide and Conquer Prime Count**\n", + "\n", + "Problem Statement:\n", + "\n", + "Given a recursive function that calculates the number of prime numbers within a given range [1, N], where N is a positive integer. The function works as follows:\n", + "\n", + "Task is to analyze this recursive function and determine the time complexity of counting prime numbers in the given range [1, N].\n", + "\n", + "Input Format:\n", + "* A positive integer N where 1 <= N <= 10^5.\n", + "\n", + "Output Format:\n", + "* An integer, the total count of prime numbers within the range [1, N].\n", + "\n", + "Sample input: N = 10\n", + "\n", + "Sample Output:4\n", + "\n", + "Constraints:\n", + "* You must analyze the time complexity of the provided recursive function and determine its order of growth.\n", + "\n", + "Solution and Justification:\n", + "The provided recursive function counts the number of prime numbers within a given range [1, N] using a recursive approach. It checks each number from N down to 1, and for each number, it iterates to check if it's prime.\n", + "The time complexity of the function can be analyzed as follows:\n", + "* In the worst case, it checks each number from N down to 1.\n", + "* For each number N, it iterates up to the square root of N to check if it's prime.\n", + "* Therefore, the time complexity of the function is O(N*sqrt(N)).\n", + "\n", + "Proof of correctness:\n", + "* The function correctly handles base cases and recursive calls.\n", + "* It checks if N is divisible by any odd numbers up to its square root to determine if it's prime.\n", + "* The count is updated accordingly.\n", + "* The correctness of the function is based on the well-known primality check.\n", + "\n", + "Reflection:\n", + "\n", + "Designing this problem was challenging because it required adapting the concept of recurrence relations and time complexity analysis into a practical problem related to prime numbers. The problem aims to test the understanding of time complexity analysis while involving a real-world scenario.\n", + "I learned that creating algorithmic problems requires a deep understanding of the underlying algorithms and their analysis. It's essential to present the problem in a clear and unambiguous manner while ensuring that the problem is non-trivial and requires a thoughtful approach to solve.\n", + "Working with ChatGPT was helpful in generating the problem statement and initial solution. It's a valuable tool for brainstorming and drafting problem descriptions.\n", + "\n" + ], + "metadata": { + "id": "z0w5Yd0jWjxu" + } + }, + { + "cell_type": "code", + "source": [ + "def count_primes(N):\n", + " if N <= 1:\n", + " return 0\n", + " if N == 2:\n", + " return 1\n", + " if N % 2 == 0:\n", + " return count_primes(N - 1)\n", + "\n", + " # Check if N is prime\n", + " is_prime = True\n", + " for i in range(3, int(N**0.5) + 1, 2):\n", + " if N % i == 0:\n", + " is_prime = False\n", + " break\n", + "\n", + " return count_primes(N - 1) + is_prime" + ], + "metadata": { + "id": "jzB_W_9AWiPt" + }, + "execution_count": 1, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + " 2.**Divide and Conquer Inversion Count**\n", + "\n", + "Problem Statement:\n", + "\n", + "Given an array of integers, and your task is to calculate the number of inversions in the array. An inversion occurs when two elements in the array are out of order, i.e., if i < j and arr[i] > arr[j]. Your goal is to implement an efficient divide and conquer algorithm to count the number of inversions in the array.\n", + "\n", + "Input Format:\n", + "\n", + "* An array of integers, arr, where 1 <= |arr| <= 10^5 (the length of the array).\n", + "* Each element in the array, arr[i], where 1 <= arr[i] <= 10^5.\n", + "\n", + "Output Format:\n", + "\n", + "* An integer, the total number of inversions in the array.\n", + "Sample Input:\n", + "arr = [2, 4, 1, 3, 5]\n", + "Sample Output: 3\n", + "\n", + "Constraints:\n", + "* You must implement an algorithm with a time complexity of O(n log n) or better.\n", + "\n", + "Solution and Justification:\n", + "\n", + "Here's a Python function mentioned below that solves the problem using a divide and conquer approach\n", + "\n", + "Proof of correctness:\n", + "\n", + "* The algorithm uses a modified merge sort to count inversions.\n", + "* It splits the array into two halves and recursively counts inversions in both halves.\n", + "* It also counts inversions that cross between the two halves during merging.\n", + "* The correctness of the algorithm is based on the fact that it explores all possible pairs of elements and correctly identifies inversions.\n", + "\n", + "Reflection:\n", + "\n", + "Designing this problem was challenging because it required adapting the concept of recurrence relations and divide and conquer algorithms into a problem related to counting inversions. The problem aims to test the understanding of algorithmic techniques for solving a real-world problem.\n", + "I learned that creating algorithmic problems involves a deep understanding of the underlying algorithms and their analysis. It's crucial to present the problem in a clear and unambiguous manner while ensuring that the problem is non-trivial and requires an efficient algorithm to solve.\n", + "Working with ChatGPT was helpful in generating the problem statement and initial solution. It's a valuable tool for brainstorming and drafting problem descriptions.\n", + "\n" + ], + "metadata": { + "id": "UZs5AM3eX47d" + } + }, + { + "cell_type": "code", + "source": [ + "def count_inversions(arr):\n", + " def merge_and_count(arr, left, mid, right):\n", + " inv_count = 0\n", + " n1 = mid - left + 1\n", + " n2 = right - mid\n", + "\n", + " L = arr[left:mid + 1]\n", + " R = arr[mid + 1:right + 1]\n", + "\n", + " i = j = 0\n", + " k = left\n", + "\n", + " while i < n1 and j < n2:\n", + " if L[i] <= R[j]:\n", + " arr[k] = L[i]\n", + " i += 1\n", + " else:\n", + " arr[k] = R[j]\n", + " j += 1\n", + " inv_count += n1 - i\n", + " k += 1\n", + "\n", + " while i < n1:\n", + " arr[k] = L[i]\n", + " i += 1\n", + " k += 1\n", + "\n", + " while j < n2:\n", + " arr[k] = R[j]\n", + " j += 1\n", + " k += 1\n", + "\n", + " return inv_count\n", + "\n", + " def merge_sort_and_count(arr, left, right):\n", + " inv_count = 0\n", + " if left < right:\n", + " mid = (left + right) // 2\n", + " inv_count += merge_sort_and_count(arr, left, mid)\n", + " inv_count += merge_sort_and_count(arr, mid + 1, right)\n", + " inv_count += merge_and_count(arr, left, mid, right)\n", + " return inv_count\n", + "\n", + " return merge_sort_and_count(arr, 0, len(arr) - 1)" + ], + "metadata": { + "id": "wMrzI5HOZ01v" + }, + "execution_count": 12, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "3.Problem Statement:\n", + "\n", + "Given a simple undirected graph represented as an adjacency list. Each node in the graph is labeled with a unique positive integer. The Task is to implement Depth First Search (DFS) to traverse the graph starting from a given node and all the nodes that are reachable from the starting node.\n", + "\n", + "Input:\n", + "\n", + "An adjacency list representing a graph where each key-value pair represents a node and its adjacent nodes. The graph is represented as a dictionary, where keys are node labels (integers), and values are lists of integers representing adjacent nodes.\n", + "A starting node label (an integer) from which the DFS traversal should begin.\n", + "\n", + "Output:\n", + "\n", + "A list of integers containing all the nodes that are reachable from the starting node using DFS traversal.\n", + "\n", + "Constraints:\n", + "\n", + "The input graph is a simple undirected graph.\n", + "The input graph has at most 10^4 nodes.\n", + "Nodes are labeled with unique positive integers.\n", + "The starting node label is guaranteed to be a valid node label in the graph.\n", + "\n", + "\n", + "Solution :\n", + "\n", + "def dfs(graph, start_node): visited = set() result = []\n", + "\n", + "def dfs_recursive(node):\n", + "\n", + " visited.add(node)\n", + " result.append(node)\n", + " for neighbor in graph.get(node, []):\n", + " if neighbor not in visited:\n", + " dfs_recursive(neighbor)\n", + "dfs_recursive(start_node)\n", + "\n", + "\n", + "\n", + "Justification:\n", + "\n", + "The dfs function takes the graph and a starting node label as input and performs a Depth First Search traversal starting from the given node. It uses a recursive helper function dfs_recursive to explore the graph and adds visited nodes to the result list. Finally, it returns the list of reachable nodes.\n", + "\n", + "Sample Input and Output:\n", + "\n", + "Input:\n", + "graph = { 1: [2, 3], 2: [1, 4, 5], 3: [1, 6], 4: [2], 5: [2], 6: [3] } start_node = 1\n", + "\n", + "Output:\n", + "reachable_nodes = dfs(graph, start_node) print(reachable_nodes)\n", + "\n", + "Output for the above graph would be: [1, 2, 4, 5, 3, 6]\n", + "\n", + "This solution provides a clear and efficient way to perform DFS traversal in a graph and all reachable nodes from a given starting node. It maintains a set of visited nodes to avoid revisiting nodes and uses recursion to explore the graph.\n", + "\n", + "Reflection:\n", + "\n", + "ChatGPT assisted in formulating this problem by providing a clear example of a DFS traversal problem and its solution. It helped by explaining the problem statement, input, output, and constraints, which served as a template for creating a similar problem.\n", + "design process is important. It helps identify challenges, validate the approach, and gain insights into how to improve future problem design." + ], + "metadata": { + "id": "_c9c9T2MaBMo" + } + }, + { + "cell_type": "markdown", + "source": [ + "**4.Recursive Factorial Calculation**\n", + "\n", + "Problem Statement:\n", + "\n", + "Given a Recursive Function that calculates the factorial of a positive integer N. The function works as follows:\n", + "\n", + "def factorial(N):\n", + " if N == 0:\n", + " return 1\n", + " return N * factorial(N - 1)\n", + "\n", + "Your task is to analyze the time complexity of the provided recursive function and determine the order of growth for calculating the factorial of N.\n", + "\n", + "Input Format:\n", + "\n", + "* A positive integer N where 1 <= N <= 10^5.\n", + "\n", + "Output Format:\n", + "\n", + "* An integer, the factorial of N.\n", + "\n", + "Sample Input:\n", + "N = 5\n", + "\n", + "Sample Output:120\n", + "\n", + "Constraints:\n", + "\n", + "* You must analyze the time complexity of the provided recursive function and determine its order of growth.\n", + "\n", + "Solution and Justification:\n", + "\n", + "The provided recursive function calculates the factorial of a positive integer N using a recursive approach. It multiplies N by the factorial of N-1 until N becomes 0.\n", + "The time complexity of the function can be analyzed as follows:\n", + "* In the worst case, it makes N recursive calls, each decrementing N by 1 until N reaches 0.\n", + "* Therefore, the time complexity of the function is O(N).\n", + "\n", + "\n", + "Proof of correctness:\n", + "\n", + "* The function correctly handles the base case where N is 0 and returns 1.\n", + "* For other values of N, it multiplies N by the factorial of N-1, which is the correct formula for calculating the factorial.\n", + "\n", + "Reflection:\n", + "\n", + "Designing this problem was interesting because it involved adapting the concept of recursive functions and time complexity analysis into a practical problem related to calculating factorials. The problem aims to test the understanding of time complexity analysis in the context of recursive algorithms.\n", + "I learned that creating algorithmic problems requires a good understanding of algorithmic concepts and their analysis. It's essential to present the problem in a clear and unambiguous manner while ensuring that the problem is non-trivial and requires a thoughtful approach to analyze and determine its time complexity.\n", + "Working with ChatGPT was helpful in generating the problem statement and initial solution. It's a valuable tool for brainstorming and drafting problem descriptions.\n", + "\n", + "\n" + ], + "metadata": { + "id": "1T_hyRk3igrt" + } + }, + { + "cell_type": "markdown", + "source": [ + "**5. Choose the correct answer** You have a large dataset of people's social connections, and you want to find the shortest path between two individuals in terms of mutual friends. Which algorithm is suitable for this task?\n", + "\n", + "a) Depth First Search\n", + "\n", + "b) Floyd-Warshall algorithm\n", + "\n", + "c) Breadth First Search\n", + "\n", + "d) Kruskal's algorithm\n", + "\n", + "Answer: c\n", + "\n", + "Explantion:\n", + "\n", + "Breadth First Search involves finding the shortest path between two individuals in terms of mutual friends in a large dataset as it explores connections at each level, hence identifying the shortest path based on the common connections.\n" + ], + "metadata": { + "id": "q47pPHI_wSIt" + } + }, + { + "cell_type": "markdown", + "source": [ + "**6. Choose the correct answer** \n", + " \n", + " You have a dataset of cities and the distances between them, and you want to find the shortest path between two cities for a delivery route. Which algorithm is suitable for this task?\n", + "\n", + "a) Dijkstra's algorithm\n", + "\n", + "b) Merge Sort\n", + "\n", + "c) Quick Sort\n", + "\n", + "d) A* algorithm\n", + "\n", + "Answer: a\n", + "\n", + "Explanation:\n", + "\n", + "Dijkstra's algorithm is the suitable choice for finding the shortest path between two cities in a dataset of cities and distances for a delivery route. Here's why:\n", + "\n", + "Weighted Graphs: Dijkstra's algorithm is specifically designed for finding the shortest path in weighted graphs, where edges have associated weights (in this case, distances between cities).\n", + "\n", + "Optimizing Delivery Routes: When you're planning a delivery route between two cities, you need to consider the shortest path in terms of distance. Dijkstra's algorithm calculates the shortest path efficiently in such scenarios.\n", + "\n", + "Guaranteed Shortest Path: Dijkstra's algorithm guarantees finding the shortest path in weighted graphs without negative edge weights, making it a reliable choice for finding optimal delivery routes.\n", + "\n", + "Notable Alternative: While A* algorithm is also a viable option for finding shortest paths in weighted graphs, it typically requires a heuristic function and is more commonly used in scenarios where you have additional information or constraints, such as real-time traffic data. Dijkstra's algorithm is a more straightforward choice for finding the shortest path based solely on distance.\n", + "\n", + "In summary, when you need to find the shortest path between two cities for a delivery route considering the distances between them, Dijkstra's algorithm is the suitable choice due to its efficiency and reliability in weighted graphs." + ], + "metadata": { + "id": "48ozKEeuxfjU" + } + }, + { + "cell_type": "markdown", + "source": [ + "**7.Question:**\n", + "\n", + "You need to find the minimum spanning tree in a weighted undirected graph to minimize the cost of connecting all nodes while avoiding cycles. Which algorithm is suitable for this task?\n", + "\n", + "a) Depth First Search\n", + "\n", + "b) Floyd-Warshall algorithm\n", + "\n", + "c) Breadth First Search\n", + "\n", + "d) Kruskal's algorithm\n", + "\n", + "Answer: d\n", + "\n", + "Explanation:\n", + "\n", + "Kruskal's algorithm is the appropriate choice for finding the minimum spanning tree in a weighted undirected graph. Here's why Kruskal's algorithm is suitable for this task:\n", + "\n", + "Minimizing Cost: Kruskal's algorithm is specifically designed to find the minimum spanning tree, which is a tree that spans all nodes in the graph with the minimum possible total edge weight. This ensures that you connect all nodes while minimizing the cost.\n", + "\n", + "Weighted Graphs: Kruskal's algorithm is effective for graphs with weighted edges, where each edge has a numerical weight or cost associated with it. It sorts the edges by weight and adds them to the minimum spanning tree in ascending order of weight.\n", + "\n", + "Cycle Avoidance: Kruskal's algorithm ensures that no cycles are formed in the spanning tree by selectively adding edges with the lowest weights, which prevents the creation of loops in the tree.\n", + "\n", + "Efficiency: Kruskal's algorithm is relatively efficient and has a time complexity of O(E log E), where E is the number of edges in the graph. This makes it suitable for large graphs.\n", + "\n", + "In summary, Kruskal's algorithm is the most appropriate choice when the goal is to find the minimum spanning tree in a weighted undirected graph to minimize the cost of connecting all nodes while avoiding cycles." + ], + "metadata": { + "id": "TVpV6cj5yTsh" + } + }, + { + "cell_type": "markdown", + "source": [ + "**8.Question:**\n", + "\n", + " When analyzing the time complexity of a divide-and-conquer algorithm using the Master Theorem, you encounter a recurrence relation of the form T(n) = a * T(n/b) + f(n), where a, b, and f(n) are constants. Which case of the Master Theorem applies when the recurrence relation can be expressed as T(n) = 4 * T(n/2) + n^2 * log n?\n", + "\n", + "Answer: The recurrence relation T(n) = 4 * T(n/2) + n^2 * log n falls into Case 2 of the Master Theorem.\n", + "\n", + "Explanation:\n", + "\n", + "In Case 2 of the Master Theorem, the recurrence relation is of the form T(n) = a * T(n/b) + f(n), where:\n", + "\n", + "a = 4 (the number of subproblems)\n", + "b = 2 (the factor by which the input size is divided in each subproblem)\n", + "f(n) = n^2 * log n (the \"combine\" step's time complexity)\n", + "\n", + "To determine the time complexity using Case 2, you compare the function f(n) to n^(log_b(a)). In this case, n^(log_2(4)) = n^2.\n", + "\n", + "If f(n) is asymptotically smaller than n^(log_b(a)), which means f(n) = O(n^(log_b(a - ε))) for some ε > 0, then the time complexity is T(n) = Θ(n^(log_b(a))).\n", + "In the given recurrence, f(n) = n^2 * log n, and n^(log_2(4)) = n^2. Since log n is polynomially smaller than n^2, we have f(n) = O(n^(2 - ε)) for some ε > 0. Therefore, Case 2 of the Master Theorem applies, and the time complexity of the algorithm is T(n) = Θ(n^2).\n", + "\n", + "So, for the recurrence T(n) = 4 * T(n/2) + n^2 * log n, the Master Theorem (Case 2) tells us that the time complexity is Θ(n^2)." + ], + "metadata": { + "id": "GE_E3V_N0M9_" + } + }, + { + "cell_type": "markdown", + "source": [ + "**9.Question**\n", + "\n", + "In computer science, when analyzing the time complexity of a divide-and-conquer algorithm, the Master Theorem is a valuable tool. Can you explain when and how the Master Theorem can be applied to determine the time complexity of such algorithms?\n", + "\n", + "Answer:\n", + "\n", + "The Master Theorem is a powerful tool for analyzing the time complexity of divide-and-conquer algorithms with a specific structure. It can be applied when you have a recurrence relation of the form:\n", + "\n", + "T(n) = aT(n/b) + f(n)\n", + "\n", + "Where:\n", + "\n", + "T(n) is the time complexity of the algorithm for a problem of size n.\n", + "a is the number of subproblems generated in the divide step.\n", + "n/b is the size of each subproblem.\n", + "f(n) represents the time complexity for dividing, combining, and any work done outside of the recursive calls.\n", + "To use the Master Theorem, follow these steps:\n", + "\n", + "Identify a, b, and f(n) from the recurrence relation.\n", + "\n", + "Compute the value of log_b(a).\n", + "\n", + "Compare f(n) to n^log_b(a):\n", + "\n", + "a) If f(n) is O(n^log_b(a)), then the time complexity is T(n) = Theta(n^log_b(a)).\n", + "\n", + "b) If f(n) is Theta(n^log_b(a)), then the time complexity is T(n) = Theta(n^log_b(a) * log^k(n)) for some k >= 0.\n", + "\n", + "c) If f(n) is Omega(n^log_b(a)), and if a * f(n/b) <= c * f(n) for some c < 1 and sufficiently large n, then the time complexity is T(n) = Theta(f(n)).\n", + "\n", + "The Master Theorem simplifies the analysis of divide-and-conquer algorithms and provides a concise way to determine their time complexity without solving the recurrence relation through recursion trees or the substitution method. It's particularly useful in cases where the recurrence relation follows the specified form." + ], + "metadata": { + "id": "JGtH4tDL0pgG" + } + }, + { + "cell_type": "markdown", + "source": [ + "**10.question:**\n", + "\n", + "The Bellman-Ford algorithm is a widely-used algorithm for finding the shortest path in a weighted graph, even in the presence of negative-weight edges. However, it has a limitation. Can you explain what this limitation is and provide an example of a scenario where the limitation becomes evident?\n", + "\n", + "Answer:\n", + "\n", + "The limitation of the Bellman-Ford algorithm is that it cannot handle graphs with negative weight cycles. A negative weight cycle is a cycle in the graph where the sum of the weights of the edges in the cycle is negative. When such cycles exist, the Bellman-Ford algorithm cannot find a reliable shortest path because it can be trapped in an infinite loop, continuously reducing the path length by going around the negative weight cycle.\n", + "\n", + "Example:\n", + "\n", + "Consider the following directed graph mentioned below:\n", + "\n", + "\n", + "In this graph, there is a negative weight cycle formed by the edges B -> C -> E -> D -> B, with a total weight of (-3) + 1 + (-2) + 2 = -2. The Bellman-Ford algorithm will not work correctly in this scenario.\n", + "\n", + "If you try to find the shortest path from A to any other node using Bellman-Ford, it will incorrectly report that there is no shortest path or provide incorrect results due to the presence of the negative weight cycle.\n", + "\n", + "To handle graphs with negative weight cycles, other algorithms like the Floyd-Warshall algorithm or specialized techniques are required. Bellman-Ford is a useful algorithm, but it's important to be aware of its limitation when dealing with graphs that may have negative weight cycles." + ], + "metadata": { + "id": "LBAdmuiD1dhx" + } + }, + { + "cell_type": "code", + "source": [ + "A --(1)--> B --(-3)--> C\n", + " ^ |\n", + " | |\n", + "(2) (1)\n", + " | |\n", + " v v\n", + " D --(-2)--> E\n" + ], + "metadata": { + "id": "sCjBorvB8D-e" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "References:\n", + "1.https://www.javatpoint.com/dijkstras-algorithm#:~:text=Dijkstra's%20Algorithm%20is%20a%20Graph,Weighted%20Graphs%20having%20positive%20weights.\n", + "2.https://www.geeksforgeeks.org/master-theorem-subtract-conquer-recurrences/\n", + "3.https://www.javatpoint.com/dijkstras-algorithm#:~:text=Dijkstra's%20Algorithm%20is%20a%20Graph,Weighted%20Graphs%20having%20positive%20weights.\n", + "4.https://chat.openai.com/auth/login" + ], + "metadata": { + "id": "9lOZq8RM8dHn" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment3_002964524_10_22_23.ipynb b/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment3_002964524_10_22_23.ipynb new file mode 100644 index 0000000..6f61275 --- /dev/null +++ b/Submissions/002964524_SakhyathaYojani_Tammineni/Assignment3_002964524_10_22_23.ipynb @@ -0,0 +1,504 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "**1.**\n", + "\n", + "Given the items below, determine which combination of items will yield the highest value without exceeding a total weight of 7. Use dynamic programming.\n", + "\n", + "| Item | Value Weight |. \n", + "|------|-------|--------|\n", + "| 1 | 3 | 2 |\n", + "| 2 | 4 | 3 |\n", + "| 3 | 5 | 4 |\n", + "| 4 | 6 | 5 |\n", + "| 5 | 7 | 6 |\n", + "\n", + "**Solution:**\n", + "Items 1 and 2 are selected for a total value of 7.\n" + ], + "metadata": { + "id": "2W7JETbznL-1" + } + }, + { + "cell_type": "markdown", + "source": [ + " **2.**\n", + "Given a graph G = (V, E) where each edge has a weight, find a minimum spanning tree using Kruskal's algorithm.\n", + "\n", + "**Solution:**\n", + "This solution depends on the specific graph given. Generally, sort the edges in increasing order and pick the smallest edge that doesn't form a cycle.\n", + "\n" + ], + "metadata": { + "id": "wBW5uMLfnjtc" + } + }, + { + "cell_type": "markdown", + "source": [ + "**3.**\n", + "Describe the differences between Prim's and Kruskal's algorithm for finding a minimum spanning tree.\n", + "\n", + "**Solution:**\n", + "Prim's algorithm starts with a node and selects the smallest edge from the tree to a non-tree vertex, while Kruskal's algorithm sorts all edges and picks the smallest edge that doesn't cause a cycle.\n", + "\n" + ], + "metadata": { + "id": "oVj7A3mrnoRB" + } + }, + { + "cell_type": "markdown", + "source": [ + "**4.**\n", + "How would you determine if a directed graph contains a cycle?\n", + "\n", + "**Solution:**\n", + "One way is to use depth-first search. If, during the traversal, we come across a vertex that is already in the recursion stack, then there is a cycle.\n", + "\n" + ], + "metadata": { + "id": "R7Ue7B-rnv9b" + } + }, + { + "cell_type": "markdown", + "source": [ + "**5.**\n", + "Explain the concept of a maximum flow in a flow network and its importance.\n", + "\n", + "**Solution:**\n", + "The maximum flow is the greatest rate at which material or information can be moved from the source to the sink without violating any capacity constraints. It's important as it determines the optimal usage of a network.\n", + "\n" + ], + "metadata": { + "id": "TBITj4rKn1gP" + } + }, + { + "cell_type": "markdown", + "source": [ + "**6.**\n", + "Using Dijkstra's algorithm, find the shortest path from node A to node D in the given weighted graph.\n", + "\n", + "**Solution:**\n", + "The specific solution depends on the graph. Generally, Dijkstra's algorithm would involve exploring paths starting from node A and choosing the path with the smallest weight at each step.\n", + "\n" + ], + "metadata": { + "id": "KFSD6N3poPy0" + } + }, + { + "cell_type": "markdown", + "source": [ + "**7.**\n", + "Explain the greedy property and its significance in the context of algorithms.\n", + "\n", + "**Solution:**\n", + "The greedy property suggests choosing the locally optimal solution at each step with the hope of finding the global optimum. It's significant because it can simplify problem-solving and improve efficiency for certain problems." + ], + "metadata": { + "id": "Psfrlpq0odLE" + } + }, + { + "cell_type": "markdown", + "source": [ + "**8.**\n", + "Describe the differences between dynamic programming and greedy algorithms.\n", + "\n", + "**Solution:**\n", + "Dynamic programming breaks problems into smaller subproblems and solves each one just once, storing the solutions to subproblems to avoid redundant work. Greedy algorithms make a series of choices by picking the best available option at each step without considering global data.\n", + "\n" + ], + "metadata": { + "id": "QUyDaRZOojAw" + } + }, + { + "cell_type": "markdown", + "source": [ + "**9.**\n", + "Explain the significance of the Bellman-Ford algorithm and where it can be applied.\n", + "\n", + "**Solution:**\n", + "Bellman-Ford is used for finding the shortest path from a single source vertex to all other vertices in a weighted graph. It's significant because, unlike Dijkstra's, it can work with graphs with negative weight edges.\n", + "\n" + ], + "metadata": { + "id": "ayVyiarJoqR1" + } + }, + { + "cell_type": "markdown", + "source": [ + "**10.Problem Title**: Optimal Movie Selection\n", + "\n", + "**Problem Statement**:\n", + "You're organizing a movie marathon night where you have a limited time to show as many movies as possible. Given the lengths of movies and their ratings, choose movies that will maximize the total rating without exceeding the time limit.\n", + "\n", + "**Input Format**:\n", + "* An integer T representing the total time available (in minutes).\n", + "* An integer N representing the number of movies.\n", + "* N lines each containing two integers:\n", + " * the length of the movie (in minutes)\n", + " * the rating of the movie (out of 10)\n", + "\n", + "**Output Format**:\n", + "* An integer representing the maximum total rating that can be achieved.\n", + "\n", + "**Sample Input**:\n", + "```\n", + "180\n", + "3\n", + "120 8\n", + "60 6\n", + "90 9\n", + "```\n", + "\n", + "**Sample Output**:\n", + "```\n", + "17\n", + "```\n", + "\n", + "**Constraints**:\n", + "* 1 ≤ T ≤ 1000\n", + "* 1 ≤ N ≤ 100\n", + "* 1 ≤ movie length ≤ 200\n", + "* 1 ≤ movie rating ≤ 10\n", + "\n", + "**Solution and Justification**:\n", + "Use a dynamic programming approach similar to the 0-1 Knapsack problem. For every movie, decide whether to include it in the marathon or not based on maximizing the rating.\n", + "\n", + "**Proof of Correctness**:\n", + "This solution uses dynamic programming to build up a solution incrementally. At each stage, it decides whether including a movie would maximize the total rating without exceeding the time limit.\n", + "\n", + "**Reflection**:\n", + "* **How ChatGPT assisted**: ChatGPT helped by providing the essence of the knapsack problem and guiding towards formulating a new problem.\n", + "* **Challenges faced**: The challenge was to keep the essence of the knapsack problem while making it original and not a mere replication.\n", + "* **Learnings**: Designing algorithmic problems requires a deep understanding of the underlying concept and creative thinking to transform it into a new, yet similar, problem.\n", + "\n" + ], + "metadata": { + "id": "KgmnTUNSo5Dr" + } + }, + { + "cell_type": "code", + "source": [ + "def maxRating(T, movies):\n", + " n = len(movies)\n", + " dp = [[0 for w in range(T + 1)] for i in range(n + 1)]\n", + "\n", + " for i in range(n + 1):\n", + " for w in range(T + 1):\n", + " if i == 0 or w == 0:\n", + " dp[i][w] = 0\n", + " elif movies[i-1][0] <= w:\n", + " dp[i][w] = max(movies[i-1][1] + dp[i-1][w - movies[i-1][0]], dp[i-1][w])\n", + " else:\n", + " dp[i][w] = dp[i-1][w]\n", + "\n", + " return dp[n][T]" + ], + "metadata": { + "id": "ZxDLnz1dpo1X" + }, + "execution_count": 1, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**11.Problem Title**: Optimal Library Route\n", + "\n", + "**Problem Statement**:\n", + "A librarian wishes to place books on shelves across various aisles. Each aisle has a score based on the importance of the books. The librarian wants to choose aisles such that they maximize the score, but they must return to their starting aisle and can't visit an aisle more than once. Find the optimal route.\n", + "\n", + "**Input Format**:\n", + "* An integer N representing the number of aisles.\n", + "* A matrix of size N x N representing the distance between aisles.\n", + "* A list of N integers representing the score of each aisle.\n", + "\n", + "**Output Format**:\n", + "* An integer representing the maximum score achievable.\n", + "\n", + "**Sample Input**:\n", + "```\n", + "3\n", + "[[0, 2, 9],\n", + " [2, 0, 6],\n", + " [9, 6, 0]]\n", + "[5, 10, 6]\n", + "```\n", + "\n", + "**Sample Output**:\n", + "```\n", + "21\n", + "```\n", + "\n", + "**Constraints**:\n", + "* 2 ≤ N ≤ 10\n", + "* Distance values are between 1 and 1000, with the diagonal being 0.\n", + "* Score values are between 1 and 100.\n", + "\n", + "** Solution and Justification**:\n", + "This problem can be tackled using a recursive approach, where at each step, we choose the next aisle based on maximizing the score while keeping track of the total distance covered.\n", + "\n", + "```python\n", + "def optimalRoute(matrix, scores, current, visited):\n", + " if len(visited) == len(matrix):\n", + " return scores[current]\n", + "\n", + " maxScore = 0\n", + " for i in range(len(matrix)):\n", + " if i not in visited and matrix[current][i] != 0:\n", + " score = scores[current] + optimalRoute(matrix, scores, i, visited + [i])\n", + " maxScore = max(maxScore, score)\n", + "\n", + " return maxScore\n", + "\n", + "result = 0\n", + "for i in range(len(matrix)):\n", + " result = max(result, optimalRoute(matrix, scores, i, [i]))\n", + "```\n", + "\n", + "**Proof of Correctness**:\n", + "This solution uses recursion to explore all possible routes and chooses the optimal one based on the scores of aisles visited. The librarian's route is optimized to maximize the score of aisles visited.\n", + "\n", + "**Reflection**:\n", + "* **How ChatGPT assisted**: ChatGPT offered insight into the underlying concept of TSP and helped translate it into a new problem context.\n", + "* **Challenges faced**: Ensuring that the new problem retains the essence of TSP while making it feel fresh and unique.\n", + "* **Learnings**: Creating problems from classic algorithms helps in understanding the core concept and teaches how different real-world scenarios can be mapped to algorithmic problems.\n" + ], + "metadata": { + "id": "tI8BupF8qFZL" + } + }, + { + "cell_type": "markdown", + "source": [ + "**12.Problem Title: Balanced Bookshelf**\n", + "\n", + "**Problem Statement:**\n", + "A library has a unique way of arranging books on a bookshelf. Each book has a unique identifier number. The left shelf contains books with identifiers less than the current book, while the right shelf contains books with identifiers greater than the current book. However, to ensure the shelf remains stable, the difference in heights between the left and right shelves for any book should not exceed 1. Determine if the given book arrangement is stable.\n", + "\n", + "**Input Format:**\n", + "\n", + "\t•\tA list of integers representing the arrangement of books.\n", + "\n", + "**Output Format:**\n", + "\n", + "\t•\t“YES” if the arrangement is stable, otherwise “NO”.\n", + "\n", + "**Sample Input:**\n", + "\n", + "[4, 2, 6, 1, 3, 5, 7]\n", + "\n", + "**Sample Output:**\n", + "\n", + "YES\n", + "\n", + "**Constraints:**\n", + "\n", + "\t•\tThe list of integers will have at least 1 and at most 10^4 integers.\n", + "\t•\tEach integer is unique and between 1 to 10^6.\n", + "\n", + "**Solution and Justification:**\n", + "The problem can be tackled by constructing the BST and then checking if it’s height-balanced.\n", + "\n", + "```python\n", + "class TreeNode:\n", + " def __init__(self, val):\n", + " self.val = val\n", + " self.left = None\n", + " self.right = None\n", + "\n", + "def insert(root, val):\n", + " if not root:\n", + " return TreeNode(val)\n", + " if val < root.val:\n", + " root.left = insert(root.left, val)\n", + " else:\n", + " root.right = insert(root.right, val)\n", + " return root\n", + "\n", + "def isBalanced(root):\n", + " if not root:\n", + " return True, 0\n", + " leftBalanced, leftHeight = isBalanced(root.left)\n", + " rightBalanced, rightHeight = isBalanced(root.right)\n", + " if abs(leftHeight - rightHeight) <= 1 and leftBalanced and rightBalanced:\n", + " return True, 1 + max(leftHeight, rightHeight)\n", + " return False, 1 + max(leftHeight, rightHeight)\n", + "\n", + "root = None\n", + "for val in input_list:\n", + " root = insert(root, val)\n", + "balanced, _ = isBalanced(root)\n", + "print(\"YES\" if balanced else \"NO\")\n", + "```\n", + "\n", + "**Proof of Correctness:**\n", + "The algorithm constructs the BST from the list and then checks each node to see if the height difference between its left and right child is at most 1.\n", + "\n", + " **Reflection:**\n", + "\n", + "\tHow ChatGPT assisted: ChatGPT provided a clear understanding of BSTs and guided the creation of a real-world scenario based on it.\n", + "\tChallenges faced: Designing a problem that retains the essence of BSTs but in a library context.\n", + "\tLearnings: Translating theoretical concepts into real-world scenarios is challenging but enlightening, revealing the pervasive nature of algorithmic logic in everyday life." + ], + "metadata": { + "id": "vwqwzo-GsoLB" + } + }, + { + "cell_type": "markdown", + "source": [ + "**13.Problem Title: Social Network Influence**\n", + "\n", + "**Problem Statement:**\n", + "In a small city, everyone is connected through a social network. Each person can influence their direct friends and spread a rumor to them. However, not everyone has an equal influence level. Some people have a higher influence, meaning when they spread a rumor, it’s more likely their friends will believe and spread it further. Determine the minimum number of individuals needed to spread a rumor so that it reaches the entire network.\n", + "\n", + "**Input Format:**\n", + "\n", + "\t•\tN - number of people in the network (1 <= N <= 10^4)\n", + "\t•\tM - number of direct connections/friendships (1 <= M <= 10^5)\n", + "\t•\tM lines, each containing two integers, representing a friendship between two people.\n", + "\t•\tList of N integers, representing the influence level of each person.\n", + "\n", + "**Output Format:**\n", + "\n", + "\t•\tAn integer - the minimum number of individuals needed to spread a rumor to the entire network.\n", + "\n", + "**Sample Input:**\n", + "\n", + "5\n", + "4\n", + "1 2\n", + "2 3\n", + "3 4\n", + "4 5\n", + "3 2 1 2 3\n", + "\n", + "**Sample Output:**\n", + "\n", + "2\n", + "\n", + "**Constraints:**\n", + "\n", + "\t•\tFriendship connections are undirected.\n", + "\t•\tInfluence level is between 1 (lowest) and 10 (highest).\n", + "\n", + " **Solution and Justification:**\n", + "An effective approach would be to use a greedy algorithm. Start by spreading the rumor from the person with the highest influence level. Then perform a BFS traversal to mark all people who are influenced. Repeat the process until all people in the network are influenced.\n", + "\n", + "**Pseudocode:**\n", + "```\n", + "sort people by influence level in descending order\n", + "set count = 0\n", + "for each person in sorted list:\n", + " if person is not influenced:\n", + " perform BFS from person to mark all influenced\n", + " increase count by 1\n", + "return count\n", + "```\n", + "\n", + "\n", + "**Proof of Correctness:**\n", + "By targeting the most influential people first, the rumor spreads more quickly and efficiently, ensuring a minimal number of individuals are needed.\n", + "\n", + " **Reflection:**\n", + "\n", + "\t•\tHow ChatGPT assisted: ChatGPT offered insights into-how the core graph theory concepts could be used in a real-world context, especially in the domain of social networks.\n", + "\n", + "\t•\tChallenges faced: Translating graph algorithms to a context like social network influence without making it a straightforward traversal problem was a challenge.\n", + "\t•\tLearnings: By adapting core algorithmic concepts to unique scenarios, it’s possible to test understanding beyond just the raw mechanics of the algorithm, but also its implications in different contexts.\n", + "\n" + ], + "metadata": { + "id": "mjhtBEho06ad" + } + }, + { + "cell_type": "markdown", + "source": [ + "**14.Problem Title**: City Water Distribution\n", + "\n", + "**Problem Statement**:\n", + "A city has designed a new water distribution system. The system consists of reservoirs (nodes) connected with pipelines (edges). Each pipeline has a capacity indicating the maximum amount of water it can transport per hour. The city wants to ensure the maximum amount of water is transferred from the main reservoir to the city center. Using the Ford-Fulkerson algorithm, determine the maximum water flow from the main reservoir to the city center.\n", + "\n", + "**Input Format**:\n", + "* N - number of reservoirs in the city (2 <= N <= 500).\n", + "* M - number of pipelines connecting the reservoirs (1 <= M <= 10^4).\n", + "* M lines, each containing three integers: two reservoir numbers (indicating the start and end reservoir) and the capacity of the pipeline.\n", + "* Two integers representing the main reservoir and the city center reservoir.\n", + "\n", + "**Output Format**:\n", + "* An integer - the maximum amount of water (in liters per hour) that can be transported from the main reservoir to the city center.\n", + "\n", + "**Sample Input**:\n", + "```\n", + "5\n", + "6\n", + "1 2 20\n", + "2 3 30\n", + "2 4 10\n", + "3 4 10\n", + "3 5 40\n", + "4 5 20\n", + "1 5\n", + "```\n", + "\n", + "**Sample Output**:\n", + "```\n", + "50\n", + "```\n", + "\n", + "**Constraints**:\n", + "* Capacity of each pipeline is between 1 and 10^6.\n", + "\n", + "**Solution and Justification**:\n", + "The solution involves implementing the Ford-Fulkerson algorithm and running it on the provided flow network.\n", + "\n", + "**Pseudocode**:\n", + "```pseudocode\n", + "initialize flow to 0\n", + "while there exists an augmenting path p in the residual graph:\n", + " augment flow using path p\n", + "return flow\n", + "```\n", + "\n", + "**Proof of Correctness**:\n", + "The Ford-Fulkerson algorithm ensures that we find the maximum flow in the network by iteratively finding augmenting paths and updating the flow.\n", + "\n", + "**Reflection**:\n", + "* **How ChatGPT assisted**: ChatGPT helped provide a real-world context (water distribution) to the classic max-flow problem, allowing for a more tangible understanding.\n", + "* **Challenges faced**: Balancing the real-world context with the complexities of the Ford-Fulkerson algorithm to ensure the problem remains meaningful.\n", + "* **Learnings**: Representing algorithmic problems in a real-world scenario can make them more engaging and relatable while testing the same core concepts.\n", + "\n" + ], + "metadata": { + "id": "3v0oyZJL2Kwt" + } + } + ] +} \ No newline at end of file diff --git a/Submissions/002964524_SakhyathaYojani_Tammineni/Assigntment_4_sakhyatha_002964524.ipynb b/Submissions/002964524_SakhyathaYojani_Tammineni/Assigntment_4_sakhyatha_002964524.ipynb new file mode 100644 index 0000000..e17512c --- /dev/null +++ b/Submissions/002964524_SakhyathaYojani_Tammineni/Assigntment_4_sakhyatha_002964524.ipynb @@ -0,0 +1,624 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "\n", + "\n", + "**1.Problem Statement:**\n", + "\n", + "Given an undirected graph G = (V, E), define a \"Colorful Cycle Cover\" as a set of cycles where each cycle is assigned a unique color, and every vertex in G belongs to exactly one cycle. Design an algorithm to determine if a Colorful Cycle Cover exists for a given graph.\n", + "\n", + "**Input Format:**\n", + "\n", + "The graph G represented by its vertices V and edges E.\n", + "The number of vertices |V| (1 <= |V| <= 1000).\n", + "The edges E represented as pairs of vertices.\n", + "\n", + "**Output Format:**\n", + "\n", + "Return True\n", + "if a Colorful Cycle Cover exists;\n", + "otherwise,\n", + "return False.\n", + "\n", + "**Sample Inputs and Outputs:**\n", + "\n", + "Input:\n", + "\n", + "Vertices: [1, 2, 3, 4, 5]\n", + "\n", + "Edges: [(1, 2), (2, 3), (3, 4), (4, 5), (5, 1)]\n", + "\n", + "Output:\n", + "\n", + "True\n", + "\n", + "**Constraints:**\n", + "\n", + "The graph is undirected.\n", + "\n", + "**Solution and Justification:**\n", + "```\n", + "def colorful_cycle_cover(graph):\n", + " def dfs(node, color, visited, colors, parent=None):\n", + " visited[node] = True\n", + " colors[node] = color\n", + "\n", + " for neighbor in graph[node]:\n", + " if not visited[neighbor]:\n", + " dfs(neighbor, 1 - color, visited, colors, node)\n", + " elif neighbor != parent and colors[neighbor] == color:\n", + " return False # Two adjacent nodes in the same cycle have the same color\n", + "\n", + " return True\n", + "\n", + " def is_colorful_cycle_cover():\n", + " num_nodes = len(graph)\n", + " visited = [False] * num_nodes\n", + " colors = [-1] * num_nodes\n", + "\n", + " for node in range(num_nodes):\n", + " if not visited[node]:\n", + " if not dfs(node, 0, visited, colors):\n", + " return False # Detected a conflict in colors within a cycle\n", + "\n", + " return True\n", + "\n", + " return is_colorful_cycle_cover()\n", + "\n", + "```\n", + "\n", + "**Example Usage**\n", + "\n", + "vertices = [1, 2, 3, 4, 5]\n", + "\n", + "edges = [(1, 2), (2, 3), (3, 4), (4, 5), (5, 1)]\n", + "\n", + "graph = {v: [] for v in vertices}\n", + "\n", + "```\n", + "for edge in edges:\n", + " graph[edge[0]].append(edge[1])\n", + " graph[edge[1]].append(edge[0])\n", + "\n", + "result = colorful_cycle_cover(graph)\n", + "print(result)\n", + "\n", + "```\n", + "\n", + "**Proof of Correctness:**\n", + "\n", + "A Colorful Cycle Cover is valid if and only if each vertex is part of exactly one cycle, and cycles are assigned unique colors.\n", + "\n", + "**Reflection:**\n", + "\n", + "ChatGPT assisted in understanding the essence of the sample problem by providing insights into graph theory and cycle cover concepts. The challenge was maintaining non-triviality while ensuring the problem aligns with the core ideas of the example. I learned the importance of incorporating unique constraints to make a problem more challenging and meaningful. The process highlighted the intricate balance between clarity, originality, and algorithmic relevance in problem design." + ], + "metadata": { + "id": "lXbZiN-M7N3P" + } + }, + { + "cell_type": "markdown", + "source": [ + "**2.Problem Statement**:\n", + "The Weighted Directed Disjoint Paths Problem is defined as follows. Given a directed graph G with edge weights and k pairs of nodes (s1, t1), (s2, t2), ..., (sk, tk), the objective is to determine whether there exist k node-disjoint paths P1, P2, ..., Pk such that the sum of weights along each path Pi is minimized. Design an algorithm to solve this problem.\n", + "\n", + "**Input Format**:\n", + "\n", + "A directed graph G represented by its vertices V, edges E, and weights on edges.\n", + "k pairs of nodes (si, ti) representing the start and end nodes of each path.\n", + "\n", + "**Output Format:**\n", + "\n", + "Return True\n", + "if there exist k node-disjoint paths with minimized total weight; otherwise, return False.\n", + "\n", + "**Sample Inputs and Outputs:**\n", + "\n", + "```\n", + "Input:\n", + "Vertices: [1, 2, 3, 4, 5]\n", + "Edges: [(1, 2, 3), (2, 3, 1), (3, 4, 2), (4, 5, 4), (5, 1, 5)]\n", + "Pairs: [(1, 3), (2, 4), (3, 5)]\n", + "\n", + "Output:\n", + "True\n", + "```\n", + "\n", + "**Constraints:**\n", + "\n", + "The graph is directed and weighted.\n", + "Weights on edges are positive integers.\n", + "\n", + "**Solution and Justification:**\n", + "\n", + "```\n", + "from collections import defaultdict\n", + "import heapq\n", + "\n", + "def weighted_directed_disjoint_paths(graph, pairs):\n", + " def dijkstra(graph, start, end):\n", + " pq = [(0, start, [])] # Priority queue to store (total_weight, current_node, path)\n", + " visited = set()\n", + "\n", + " while pq:\n", + " (total_weight, current_node, path) = heapq.heappop(pq)\n", + "\n", + " if current_node not in visited:\n", + " visited.add(current_node)\n", + " path = path + [current_node]\n", + "\n", + " if current_node == end:\n", + " return total_weight, path\n", + "\n", + " for neighbor, weight in graph[current_node]:\n", + " heapq.heappush(pq, (total_weight + weight, neighbor, path))\n", + "\n", + " return float('inf'), []\n", + "\n", + " def find_disjoint_paths():\n", + " paths = []\n", + "\n", + " for start, end in pairs:\n", + " weight, path = dijkstra(graph, start, end)\n", + "\n", + " if weight == float('inf'):\n", + " return False # No path found for a pair, so overall solution is not possible\n", + "\n", + " paths.append(path)\n", + "\n", + " # Check if paths are node-disjoint\n", + " all_nodes = set()\n", + " for path in paths:\n", + " if set(path) & all_nodes:\n", + " return False # Paths are not node-disjoint\n", + " all_nodes.update(path)\n", + "\n", + " return True\n", + "\n", + " return find_disjoint_paths()\n", + "\n", + "# Example Usage\n", + "vertices = [1, 2, 3, 4, 5]\n", + "edges = [(1, 2, 3), (2, 3, 1), (3, 4, 2), (4, 5, 4), (5, 1, 5)]\n", + "graph = defaultdict(list)\n", + "\n", + "for edge in edges:\n", + " graph[edge[0]].append((edge[1], edge[2]))\n", + "\n", + "pairs = [(1, 3), (2, 4), (3, 5)]\n", + "\n", + "result = weighted_directed_disjoint_paths(graph, pairs)\n", + "print(result)\n", + "```\n", + "\n", + "**Proof of Correctness:**\n", + "\n", + "The algorithm can be proven correct by adapting the logic from the k disjoint shortest paths problem, considering edge weights and ensuring the paths are node-disjoint.\n", + "\n", + "**Reflection:**\n", + "\n", + "ChatGPT played a crucial role in understanding the structure of the Directed Disjoint Paths Problem. The challenge was to introduce edge weights while maintaining the essence of the original problem. This required careful consideration of graph theory concepts and optimization objectives. The process highlighted the significance of adapting known problems to explore new variants and complexities. The use of ChatGPT enhanced my ability to design problems that align with established algorithmic structures.\n", + "\n", + "\n", + "\n", + "\n" + ], + "metadata": { + "id": "ABdKO_RY9OJK" + } + }, + { + "cell_type": "markdown", + "source": [ + "**3.Problem Statement:**\n", + "\n", + "You are managing a technology conference and need to ensure that there is at least one speaker proficient in each of the n conference tracks, such as machine learning, cybersecurity, cloud computing, data science, and more. You have received applications from m potential speakers. For each of the n tracks, there is a subset of potential speakers qualified to present on it. The question is: For a given number k ≤ m, is it possible to select at most k speakers who can cover all of the n conference tracks? We’ll call this the Optimal Speaker Set.\n", + "\n", + "**Input Format:**\n", + "\n", + "The number of conference tracks n.\n", + "The number of potential speakers m.\n", + "For each track i (1 ≤ i ≤ n), a list of potential speakers qualified for that track.\n", + "The maximum number of speakers k to be selected.\n", + "\n", + "**Output Format:**\n", + "\n", + "Return True if it's possible to select at most k speakers who can cover all conference tracks; otherwise, return False.\n", + "\n", + "**Sample Inputs and Outputs:**\n", + "```\n", + "Input:\n", + "Number of tracks: 4\n", + "Number of speakers: 6\n", + "Tracks:\n", + "1. [1, 2, 3]\n", + "2. [2, 3, 4]\n", + "3. [1, 4, 5]\n", + "4. [3, 5, 6]\n", + "Maximum speakers to select: 3\n", + "\n", + "Output:\n", + "True\n", + "```\n", + "**Constraints:**\n", + "\n", + "The number of tracks n and potential speakers m are positive integers.\n", + "The list of speakers for each track is a subset of the potential speakers.\n", + "The maximum number of speakers k is less than or equal to m.\n", + "\n", + "**Solution and Justification:**\n", + "```\n", + "def optimal_speaker_set(n, m, tracks, k):\n", + " speakers_covered = set()\n", + "\n", + " def backtrack(track_index, selected_speakers):\n", + " nonlocal speakers_covered\n", + "\n", + " if len(selected_speakers) > k:\n", + " return False\n", + "\n", + " if track_index == n:\n", + " speakers_covered.update(selected_speakers)\n", + " return True\n", + "\n", + " for speaker in tracks[track_index]:\n", + " if speaker not in selected_speakers:\n", + " if backtrack(track_index + 1, selected_speakers + [speaker]):\n", + " return True\n", + "\n", + " return False\n", + "\n", + " result = backtrack(0, [])\n", + "\n", + " return result and len(speakers_covered) <= k\n", + "\n", + "# Example Usage\n", + "num_tracks = 4\n", + "num_speakers = 6\n", + "conference_tracks = [\n", + " [1, 2, 3],\n", + " [2, 3, 4],\n", + " [1, 4, 5],\n", + " [3, 5, 6]\n", + "]\n", + "max_speakers = 3\n", + "\n", + "result = optimal_speaker_set(num_tracks, num_speakers, conference_tracks, max_speakers)\n", + "print(result)\n", + "```\n", + "\n", + "**Proof of Correctness:**\n", + "\n", + "The algorithm can be proven correct by adapting the logic from the worked example problem, showcasing that if there are k subsets whose union covers all tracks, then it is possible to select at most k speakers who can cover all conference tracks.\n", + "\n", + "**Reflection:**\n", + "\n", + "ChatGPT facilitated the understanding of the essence of the sample problem and guided in formulating a new problem with a similar structure. The challenge was introducing a context that aligns with the conference speaker scenario while ensuring non-triviality. This process emphasized the importance of adapting known problems to different domains. The insights gained through the use of ChatGPT improved problem design skills and the ability to create meaningful algorithmic challenges." + ], + "metadata": { + "id": "uB4b-H6d_fnZ" + } + }, + { + "cell_type": "markdown", + "source": [ + "**4.Problem:** Resource Allocation Challenge\n", + "\n", + "**Problem Statement:**\n", + "You are tasked with organizing resources for a technology conference, and the goal is to efficiently allocate resources to cover various technical domains. There are n distinct technical domains represented by the set {domain_1, domain_2, ..., domain_n}. You have received applications from m potential speakers. For each technical domain, there is a subset of the m applicants qualified to speak on that domain.\n", + "\n", + "Given a positive integer k (k < m), your task is to determine if it's possible to select at most k speakers in such a way that at least one speaker is qualified for each technical domain. We'll refer to this problem as the \"Resource Allocation Challenge.\"\n", + "\n", + "**Input Format:**\n", + "A positive integer n representing the number of distinct technical domains.\n", + "A positive integer m representing the number of potential speakers.\n", + "A list of m sets, where each set represents the qualified speakers for a particular technical domain.\n", + "A positive integer k representing the maximum number of speakers to be selected.\n", + "\n", + "**Output Format:**\n", + "True if it's possible to select at most k speakers with at least one speaker qualified for each technical domain, and False otherwise.\n", + "\n", + "**Constraints:**\n", + "1 <= n, m, k <= 100\n", + "Each set of qualified speakers contains at most 10 applicants.\n", + "\n", + "\n", + "**Sample Input 1:**\n", + "\n", + "```\n", + "n = 3\n", + "m = 5\n", + "Qualified Speakers:\n", + "[{Alice, Bob}, {Bob, Charlie}, {Alice, Dave}, {Dave, Ellie}, {Charlie, Ellie}]\n", + "k = 3\n", + "\n", + "```\n", + "**Sample Output 1:**\n", + "```\n", + "True\n", + "```\n", + "\n", + "**Sample Input 2:**\n", + "```\n", + "n = 2\n", + "m = 4\n", + "Qualified Speakers:\n", + "[{Alice, Bob}, {Bob, Charlie}, {Dave, Ellie}, {Charlie, Ellie}]\n", + "k = 2\n", + "```\n", + "**Sample Output 2:**\n", + "```\n", + "False\n", + "\n", + "```\n", + "**Solution:**\n", + "```\n", + "def resource_allocation_challenge(n, m, qualified_speakers, k):\n", + " # Create a list to track the selected speakers for each domain\n", + " selected_speakers = [set() for _ in range(n)]\n", + "\n", + " # Sort the qualified_speakers by the size of the sets in non-decreasing order\n", + " sorted_speakers = sorted(qualified_speakers, key=len)\n", + "\n", + " # Greedily select speakers to cover each technical domain\n", + " for speakers in sorted_speakers:\n", + " for i in range(n):\n", + " if len(selected_speakers[i]) == 0 or len(selected_speakers[i].intersection(speakers)) == 0:\n", + " # Add the speaker to the selected list for this domain\n", + " selected_speakers[i].update(speakers)\n", + " break\n", + "\n", + " # Check if at least one speaker is selected for each technical domain\n", + " for speakers in selected_speakers:\n", + " if len(speakers) == 0:\n", + " return False\n", + "\n", + " # Check if the total number of selected speakers is at most k\n", + " total_selected = sum(len(speakers) for speakers in selected_speakers)\n", + " return total_selected <= k\n", + "\n", + "# Example Usage:\n", + "n = 3\n", + "m = 5\n", + "qualified_speakers = [\n", + " {\"Alice\", \"Bob\"},\n", + " {\"Bob\", \"Charlie\"},\n", + " {\"Alice\", \"Dave\"},\n", + " {\"Dave\", \"Ellie\"},\n", + " {\"Charlie\", \"Ellie\"}\n", + "]\n", + "k = 3\n", + "\n", + "result = resource_allocation_challenge(n, m, qualified_speakers, k)\n", + "print(result)\n", + "```\n", + "\n", + "**Justification:**\n", + "\n", + "The problem is in NP:\n", + "\n", + "Given a set of k speakers, we can efficiently check in linear time if for every technical domain there is at least one qualified speaker.\n", + "\n", + " **Proof of NP-completeness:**\n", + "\n", + "We will prove that the problem is NP-complete by reducing the Set Cover problem to the Resource Allocation Challenge.\n", + "Given an instance of the Set Cover problem, we can construct an instance of the Resource Allocation Challenge by mapping each element of the set U to a technical domain and each subset to a set of qualified speakers.\n", + "The reduction takes polynomial time.\n", + "If there are k sets whose union is U, then there exist k speakers covering all technical domains. Hence, Set Cover ≤P Resource Allocation Challenge.\n", + "\n", + "**Reflection:**\n", + "\n", + "ChatGPT Assistance:\n", + "\n", + "Interacting with ChatGPT helped in brainstorming ideas for creating a problem that captures the essence of the Efficient Recruiting Problem. The model's responses guided the formulation of a problem that aligns with the given sample problem's structure and logic.\n", + "\n", + "Challenges Faced:\n", + "\n", + "Ensuring that the new problem maintains the spirit of the example while being distinct and non-trivial was challenging. Striking the right balance between similarity and originality required careful consideration of problem constraints and input/output formats.\n", + "\n", + "Insights on Problem Design:\n", + "\n", + "Designing an algorithmic problem involves a deep understanding of the core concepts and finding a balance between complexity and clarity. Utilizing tools like ChatGPT facilitates idea generation and refinement, but it's crucial to maintain a critical eye to ensure the problem's relevance and uniqueness.\n", + "\n", + "\n", + "\n" + ], + "metadata": { + "id": "aGM85cYe174Z" + } + }, + { + "cell_type": "markdown", + "source": [ + "**5.Problem: Task Assignment Dilemma**\n", + "\n", + "**Problem Statement:**\n", + "You are part of a study group, the Study Sprints Society, where n students collaboratively tackle a series of tasks over the next n days. Each student has specific preferences for the days they can work on tasks, and scheduling conflicts arise due to exams, extracurricular activities, and more.\n", + "\n", + "For each student, let P be the set of days {p1, p2, ..., pn} when they are available to work on tasks. A student cannot leave P empty. If a student isn't scheduled to work on any of the n days, they must contribute $50 to hire someone else to complete their tasks.\n", + "\n", + "Express this problem as a maximum flow problem that schedules the maximum number of matches between the students and the task days.\n", + "\n", + "**Input Format:**\n", + "A positive integer n representing the number of students in the Study Sprints Society.\n", + "For each student i (1 <= i <= n), provide a set of days Pi when they are available to work on tasks.\n", + "A positive integer cost representing the amount a student must contribute if not scheduled to work on any day.\n", + "\n", + "**Output Format:**\n", + "The maximum number of task assignments that can be scheduled.\n", + "For each student i, list the days they are scheduled to work on tasks.\n", + "\n", + "**Constraints:**\n", + "1 <= n <= 20\n", + "Each set Pi contains at most 10 days.\n", + "0 <= cost <= 100\n", + "\n", + "**Example:**\n", + "\n", + "**Input:**\n", + "```\n", + "n = 3\n", + "Preferences:\n", + "P1 = {1, 2, 3}\n", + "P2 = {2, 3, 4}\n", + "P3 = {3, 4, 5}\n", + "Cost = 50\n", + "```\n", + "**Output:**\n", + "```\n", + "Maximum Task Assignments: 3\n", + "Task Assignments:\n", + "Student 1: {1}\n", + "Student 2: {2}\n", + "Student 3: {3}\n", + "```\n", + "\n", + "**Solution:**\n", + "```\n", + "from collections import defaultdict\n", + "from queue import Queue\n", + "\n", + "def task_assignment_dilemma(n, preferences, cost):\n", + " # Graph construction for Ford-Fulkerson\n", + " graph = defaultdict(dict)\n", + " source = 's'\n", + " sink = 't'\n", + "\n", + " for i in range(1, n + 1):\n", + " # Create vertices for students and connect to source with capacity 1\n", + " student_vertex = f'x{i}'\n", + " graph[source][student_vertex] = 1\n", + "\n", + " for day in preferences[i - 1]:\n", + " # Create vertices for task days and connect to sink with capacity 1\n", + " day_vertex = f'y{day}'\n", + " graph[student_vertex][day_vertex] = float('inf')\n", + "\n", + " for i in range(1, n + 1):\n", + " # Connect each student vertex to sink with capacity 'cost'\n", + " student_vertex = f'x{i}'\n", + " graph[student_vertex][sink] = cost\n", + "\n", + " # Run Ford-Fulkerson algorithm\n", + " max_flow = 0\n", + " while True:\n", + " path, min_capacity = bfs(graph, source, sink)\n", + " if not path:\n", + " break\n", + "\n", + " max_flow += min_capacity\n", + "\n", + " # Update capacities along the augmenting path\n", + " for u, v in zip(path, path[1:]):\n", + " graph[u][v] -= min_capacity\n", + " graph[v][u] += min_capacity\n", + "\n", + " # Determine the scheduled task assignments for each student\n", + " task_assignments = defaultdict(set)\n", + " for student_vertex in graph[source]:\n", + " for day_vertex in graph[student_vertex]:\n", + " if graph[student_vertex][day_vertex] == 0:\n", + " day = int(day_vertex[1:])\n", + " student = int(student_vertex[1:])\n", + " task_assignments[student].add(day)\n", + "\n", + " # Print the results\n", + " print(f\"Maximum Task Assignments: {max_flow}\")\n", + " print(\"Task Assignments:\")\n", + " for student, assignments in task_assignments.items():\n", + " print(f\"Student {student}: {assignments}\")\n", + "\n", + " return max_flow, task_assignments\n", + "\n", + "def bfs(graph, source, sink):\n", + " visited = set()\n", + " parent = {}\n", + " min_capacity = {}\n", + "\n", + " q = Queue()\n", + " q.put((source, float('inf')))\n", + " visited.add(source)\n", + "\n", + " while not q.empty():\n", + " current, capacity = q.get()\n", + "\n", + " for neighbor, residual_capacity in graph[current].items():\n", + " if neighbor not in visited and residual_capacity > 0:\n", + " visited.add(neighbor)\n", + " parent[neighbor] = current\n", + " min_capacity[neighbor] = min(capacity, residual_capacity)\n", + "\n", + " if neighbor == sink:\n", + " # Reconstruct path\n", + " path = []\n", + " current = sink\n", + " while current != source:\n", + " path.append(current)\n", + " current = parent[current]\n", + " path.append(source)\n", + " path.reverse()\n", + "\n", + " return path, min_capacity[sink]\n", + "\n", + " q.put((neighbor, min(capacity, residual_capacity)))\n", + "\n", + " return [], 0\n", + "\n", + "# Example Usage:\n", + "n = 3\n", + "preferences = [\n", + " {1, 2, 3},\n", + " {2, 3, 4},\n", + " {3, 4, 5}\n", + "]\n", + "cost = 50\n", + "\n", + "task_assignment_dilemma(n, preferences, cost)\n", + "```\n", + "**Justification:**\n", + "The problem is in NP:\n", + "\n", + "Given a set of task assignments, we can easily verify in linear time if for every student there is at least one assigned task.\n", + "\n", + "**Proof of NP-completeness:**\n", + "\n", + "We will prove that the problem is NP-complete by adapting the reduction from the Ice-Cream and Rainbows Collective problem.\n", + "The graph construction remains the same, with vertices representing students, task days, source, and sink.\n", + "The cost parameter is incorporated as the contribution amount for a student not scheduled.\n", + "The algorithm runs Ford-Fulkerson and computes the resulting minimum cut, determining the maximum task assignments.\n", + "For each edge in the cut, we identify the scheduled task assignments for each student.\n", + "\n", + "**Reflection:**\n", + "\n", + "ChatGPT Assistance:\n", + "Interacting with ChatGPT provided valuable insights into adapting the essence of the example problem to a new context. The model's responses guided the formulation of a problem that aligns with the structure and logic of the sample problem.\n", + "\n", + "Challenges Faced:\n", + "Ensuring the new problem maintains the spirit of the example while introducing variations was challenging. Striking a balance between similarity and originality required careful consideration of problem constraints and input/output formats.\n", + "\n", + "Insights on Problem Design:\n", + "Designing an algorithmic problem involves a deep understanding of core concepts and finding a balance between complexity and clarity. Utilizing tools like ChatGPT facilitates idea generation and refinement, but it's crucial to maintain a critical eye to ensure the problem's relevance and uniqueness." + ], + "metadata": { + "id": "v_-96qkl40w8" + } + } + ] +} \ No newline at end of file diff --git a/Submissions/002964524_SakhyathaYojani_Tammineni/sakhyatha_assignment_5_002964524.ipynb b/Submissions/002964524_SakhyathaYojani_Tammineni/sakhyatha_assignment_5_002964524.ipynb new file mode 100644 index 0000000..8d79a42 --- /dev/null +++ b/Submissions/002964524_SakhyathaYojani_Tammineni/sakhyatha_assignment_5_002964524.ipynb @@ -0,0 +1,1017 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "\n", + "\n", + "1.Problem:\n", + "\n", + "A mobile game randomly and uniformly awards a set of k special coins for completing each level. There are n different types of coins. Assuming all levels are equally likely to award each set of coins, how many levels must you complete before you expect to have at least one set of coins that contains all n types of coins?\n", + "\n", + "Solution:\n", + "\n", + "This problem is similar to the coupon collector problem, but instead of collecting individual coins, we are collecting sets of coins. The expected number of levels required to collect at least one set of coins that contains all n types of coins is given by:\n", + "\n", + "```\n", + "E[X] = nH(n) * k\n", + "```\n", + "\n", + "where H(n) is the harmonic series.\n", + "\n", + "Proof of correctness:\n", + "\n", + "The proof of correctness is similar to the proof of correctness for the coupon collector problem. We can use induction to show that the expected number of levels required to collect at least one set of coins that contains all n types of coins is given by nH(n) * k.\n", + "\n", + "Base case:\n", + "\n", + "When n = 1, we only need to collect one type of coin. The expected number of levels required to do this is simply k, since each level has an equal probability of awarding any set of coins.\n", + "\n", + "Inductive step:\n", + "\n", + "Assume that the expected number of levels required to collect at least one set of coins that contains all n - 1 types of coins is given by (n - 1)H(n - 1) * k. We want to show that the expected number of levels required to collect at least one set of coins that contains all n types of coins is given by nH(n) * k.\n", + "\n", + "Consider the following cases:\n", + "\n", + "Case 1: We collect a set of coins that contains all n types of coins in the first k levels. The probability of this happening is k/n^k. In this case, the expected number of levels required is simply k.\n", + "Case 2: We do not collect a set of coins that contains all n types of coins in the first k levels. In this case, we can use the inductive hypothesis to say that the expected number of levels required to collect at least one set of coins that contains all n types of coins is given by (n - 1)H(n - 1) * k.\n", + "Therefore, the overall expected number of levels required to collect at least one set of coins that contains all n types of coins is given by:\n", + "\n", + "\n", + "```\n", + "E[X] = k/n^k + (1 - k/n^k) * (n - 1)H(n - 1) * k\n", + "```\n", + "\n", + "This can be simplified to:\n", + "\n", + "```\n", + "E[X] = nH(n) * k\n", + "```\n", + "Coding example:\n", + "\n", + "The following Python code implements the solution to the problem:\n", + "\n", + "```\n", + "import random\n", + "\n", + "def expected_levels(n, k):\n", + " \"\"\"Calculates the expected number of levels required to collect at least one set of k coins that contains all n types of coins.\n", + "\n", + " Args:\n", + " n: The number of different types of coins.\n", + " k: The number of coins in each set.\n", + "\n", + " Returns:\n", + " The expected number of levels required.\n", + " \"\"\"\n", + "\n", + " expected_levels = 0\n", + " for i in range(1, n + 1):\n", + " expected_levels += i * 1 / i\n", + "\n", + " return expected_levels * k\n", + "\n", + "# Example usage:\n", + "\n", + "n = 10\n", + "k = 3\n", + "\n", + "expected_levels = expected_levels(n, k)\n", + "\n", + "print(\"The expected number of levels required is\", expected_levels)\n", + "```\n", + "\n", + "Reflection:\n", + "\n", + "ChatGPT assisted me in this task by helping me to come up with a new problem that is similar to the original problem, but also non-trivial and original. ChatGPT also helped me to develop a solution to the new problem and to write a proof of correctness.\n", + "\n", + "One challenge that I faced was ensuring that the new problem maintained the spirit of the original problem. I wanted to make sure that the new problem was still testing similar algorithmic concepts, but also that it was new and interesting.\n", + "\n", + "I learned a lot about problem design in the realm of algorithms from this task. I learned that it is important to start by understanding the essence and structure of the original problem. It is also important to make sure that the new problem is non-trivial and original, and that it tests similar algorithmic concepts as the original problem." + ], + "metadata": { + "id": "eKfOa81v3ywo" + } + }, + { + "cell_type": "markdown", + "source": [ + "2.Problem statement:\n", + "\n", + "Given a 2D grid of blocks, where each block is either a wall or a movable block, and a target position, find the minimum number of moves required to move the target block to the target position.\n", + "\n", + "Input and Output format:\n", + "\n", + "The input is a 2D grid of integers, where each integer represents the type of block at that position. A wall is represented by 1, a movable block is represented by 2, and the target position is represented by 3.\n", + "\n", + "The output is the minimum number of moves required to move the target block to the target position, or -1 if it is not possible.\n", + "\n", + "Sample Inputs and Outputs:\n", + "\n", + "```\n", + "\n", + "Input:\n", + "[[1, 1, 1, 1],\n", + " [1, 2, 1, 1],\n", + " [1, 1, 1, 3]]\n", + "\n", + "Output:\n", + "2\n", + "Input:\n", + "[[1, 1, 1, 1],\n", + " [1, 2, 1, 3],\n", + " [1, 1, 1, 1]]\n", + "\n", + "Output:\n", + "-1\n", + "```\n", + "\n", + "Constraints:\n", + "\n", + "The grid is a square grid with dimensions of at least 3x3.\n", + "The target position must be within the grid.\n", + "A movable block can only be pushed in one direction at a time.\n", + "A movable block cannot be pushed into a wall.\n", + "Solution and Justification:\n", + "\n", + "The solution to this problem is to use a breadth-first search algorithm to explore all possible sequences of moves. The starting state is the current state of the grid. The successor states are generated by moving the target block in all possible directions. The algorithm terminates when the target position is reached, or when there are no more successor states to explore.\n", + "\n", + "Proof of correctness:\n", + "\n", + "The breadth-first search algorithm is guaranteed to find the shortest sequence of moves to the target position, if one exists. This is because the algorithm explores all possible sequences of moves in order of length.\n", + "\n", + "Coding example:\n", + "\n", + "The following Python code implements the solution to the problem:\n", + "\n", + "```\n", + "def bfs(grid, target_position):\n", + " \"\"\"Finds the minimum number of moves required to move the target block to the target position.\n", + "\n", + " Args:\n", + " grid: A 2D grid of integers, where each integer represents the type of block at that position.\n", + " target_position: The target position.\n", + "\n", + " Returns:\n", + " The minimum number of moves required to move the target block to the target position, or -1 if it is not possible.\n", + " \"\"\"\n", + "\n", + " queue = deque([(grid, 0)])\n", + " visited = set()\n", + "\n", + " while queue:\n", + " grid, moves = queue.popleft()\n", + "\n", + " if grid[target_position[0]][target_position[1]] == 3:\n", + " return moves\n", + "\n", + " for direction in ['up', 'down', 'left', 'right']:\n", + " new_grid = move_block(grid, target_position, direction)\n", + "\n", + " if new_grid not in visited:\n", + " visited.add(new_grid)\n", + " queue.append((new_grid, moves + 1))\n", + "\n", + " return -1\n", + "\n", + "def move_block(grid, position, direction):\n", + " \"\"\"Moves the block at the given position in the given direction.\n", + "\n", + " Args:\n", + " grid: A 2D grid of integers, where each integer represents the type of block at that position.\n", + " position: The position of the block to move.\n", + " direction: The direction in which to move the block.\n", + "\n", + " Returns:\n", + " A new 2D grid with the block at the given position moved in the given direction.\n", + " \"\"\"\n", + "\n", + " new_grid = [[block for block in row] for row in grid]\n", + "\n", + " if direction == 'up':\n", + " new_grid[position[0] - 1][position[1]] = 2\n", + " new_grid[position[0]][position[1]] = 0\n", + " elif direction == 'down':\n", + " new_grid[position[0] + 1][position[1]] = 2\n", + " new_grid[position[0]][position[1]] = 0\n", + " elif direction == 'left':\n", + " new_grid[position[0]][position[1] - 1] = 2\n", + " new_grid[position[0]][position[1]] = 0\n", + " elif direction == 'right':\n", + " new_grid[position[0]][position[1] + 1] = 2\n", + " new_grid[position[0]][position[1]] = 0\n", + "\n", + " return new_grid\n", + "\n", + "# Example usage\n", + "```\n", + "\n", + "Reflection:\n", + "\n", + "ChatGPT assisted me in this task by helping me to come up with a new problem that is similar to the original problem, but also non-trivial and original. ChatGPT also helped me to develop a solution to the new problem and to write a proof of correctness.\n", + "\n", + "One challenge that I faced was ensuring that the new problem maintained the spirit of the original problem. I wanted to make sure that the new problem was still testing similar algorithmic concepts, but also that it was new and interesting.\n", + "\n", + "I learned a lot about problem design in the realm of algorithms from this task. I learned that it is important to start by understanding the essence and structure of the original problem. It is also important to make sure that the new problem is non-trivial and original, and that it tests similar algorithmic concepts as the original problem.\n" + ], + "metadata": { + "id": "7kRvQzd14ecR" + } + }, + { + "cell_type": "markdown", + "source": [ + "3.Problem statement:\n", + "\n", + "Given a binary tree, where each node has a value of 0 or 1, find the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + "\n", + "Input and Output format:\n", + "\n", + "The input is a binary tree, represented as a nested list or dictionary.\n", + "\n", + "The output is the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + "\n", + "Sample Inputs and Outputs:\n", + "```\n", + "Input:\n", + "[[1, 1, 0],\n", + " [1, 0, 1],\n", + " [0, 1, 1]]\n", + "\n", + "Output:\n", + "4\n", + "Input:\n", + "[[1, 0, 1],\n", + " [1, 0, 1],\n", + " [0, 1, 1]]\n", + "\n", + "Output:\n", + "3\n", + "```\n", + "Constraints:\n", + "\n", + "The binary tree is not empty.\n", + "Each node in the binary tree has a value of 0 or 1.\n", + "Solution and Justification:\n", + "\n", + "This problem can be solved using a recursive algorithm. The algorithm works by recursively traversing the binary tree and calculating the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + "\n", + "The algorithm returns the following values:\n", + "\n", + "The maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + "The value of the current node.\n", + "The algorithm works as follows:\n", + "\n", + "If the current node is a leaf node, return the value of the current node.\n", + "Otherwise, recursively calculate the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value, for both the left and right child nodes.\n", + "Return the maximum of the two values calculated in step 2, plus the value of the current node.\n", + "Proof of correctness:\n", + "\n", + "The algorithm is guaranteed to find the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value. This is because the algorithm recursively explores all possible paths from the root to a leaf and calculates the maximum sum of each path.\n", + "\n", + "Coding example:\n", + "\n", + "The following Python code implements the solution to the problem:\n", + "\n", + "```\n", + "def max_path_sum_same_value(tree):\n", + " \"\"\"Finds the maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + "\n", + " Args:\n", + " tree: A binary tree, represented as a nested list or dictionary.\n", + "\n", + " Returns:\n", + " The maximum sum of a path from the root to a leaf, where the path only contains nodes with the same value.\n", + " \"\"\"\n", + "\n", + " if not tree:\n", + " return 0\n", + "\n", + " if isinstance(tree, int):\n", + " return tree\n", + "\n", + " left_sum = max_path_sum_same_value(tree[0])\n", + " right_sum = max_path_sum_same_value(tree[1])\n", + "\n", + " return max(left_sum, right_sum) + tree[2]\n", + "\n", + "# Example usage\n", + "\n", + "tree = [[1, 1, 0],\n", + " [1, 0, 1],\n", + " [0, 1, 1]]\n", + "\n", + "max_sum = max_path_sum_same_value(tree)\n", + "\n", + "print(max_sum)\n", + "```\n", + "Reflection:\n", + "\n", + "ChatGPT assisted me in this task by helping me to come up with a new problem that is similar to the given problem, but also non-trivial and original. ChatGPT also helped me to develop a solution to the new problem and to write a proof of correctness.\n", + "\n", + "One challenge that I faced was ensuring that the new problem maintained the spirit of the given problem. I wanted to make sure that the new problem was still testing similar algorithmic concepts, but also that it was new and interesting.\n", + "\n", + "I learned a lot about problem design in the realm of algorithms from this task. I learned that it is important to start by understanding the essence and structure of the given problem. It is also important to make sure that the new problem is non-trivial and original, and that it tests similar algorithmic concepts as the given problem." + ], + "metadata": { + "id": "tb_B2gV56NBy" + } + }, + { + "cell_type": "markdown", + "source": [ + "4.Problem statement:\n", + "\n", + "Given a matrix of characters, where each character represents a different type of terrain (e.g., mountain, forest, water), find the shortest path from a start position to an end position, where the path only contains terrain of the same type.\n", + "\n", + "Input and Output format:\n", + "\n", + "The input is a matrix of characters, where each character represents a different type of terrain. The start and end positions are also given as input.\n", + "\n", + "The output is the shortest path from the start position to the end position, where the path only contains terrain of the same type, or -1 if no such path exists.\n", + "\n", + "Sample Inputs and Outputs:\n", + "\n", + "```\n", + "Input:\n", + "[['M', 'F', 'W'],\n", + " ['M', 'F', 'W'],\n", + " ['W', 'F', 'E']]\n", + "\n", + "Start position: (0, 0)\n", + "End position: (2, 2)\n", + "\n", + "Output:\n", + "[(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)]\n", + "Input:\n", + "[['M', 'F', 'W'],\n", + " ['M', 'F', 'W'],\n", + " ['W', 'F', 'E']]\n", + "\n", + "Start position: (0, 0)\n", + "End position: (0, 2)\n", + "\n", + "Output:\n", + "-1\n", + "```\n", + "Constraints:\n", + "\n", + "The matrix is not empty.\n", + "Each character in the matrix represents a different type of terrain.\n", + "The start and end positions are within the bounds of the matrix.\n", + "Solution and Justification:\n", + "\n", + "This problem can be solved using a breadth-first search algorithm. The algorithm works by recursively exploring all possible paths from the start position to the end position, where the path only contains terrain of the same type.\n", + "\n", + "The algorithm maintains a queue of positions to visit. The algorithm starts by adding the start position to the queue. Then, the algorithm repeatedly removes the first position from the queue and visits it.\n", + "\n", + "If the current position is the end position, the algorithm returns the path to the current position. Otherwise, the algorithm adds all of the neighboring positions of the current position to the queue, if they are of the same type as the current position.\n", + "\n", + "The algorithm terminates when the queue is empty, or when the end position is reached.\n", + "\n", + "Proof of correctness:\n", + "\n", + "The algorithm is guaranteed to find the shortest path from the start position to the end position, where the path only contains terrain of the same type. This is because the algorithm recursively explores all possible paths from the start position to the end position, where the path only contains terrain of the same type.\n", + "\n", + "Coding example:\n", + "\n", + "The following Python code implements the solution to the problem:\n", + "\n", + "```\n", + "def shortest_path_same_terrain(matrix, start_position, end_position):\n", + " \"\"\"Finds the shortest path from the start position to the end position, where the path only contains terrain of the same type.\n", + "\n", + " Args:\n", + " matrix: A matrix of characters, where each character represents a different type of terrain.\n", + " start_position: The start position.\n", + " end_position: The end position.\n", + "\n", + " Returns:\n", + " The shortest path from the start position to the end position, where the path only contains terrain of the same type, or -1 if no such path exists.\n", + " \"\"\"\n", + "\n", + " queue = deque([start_position])\n", + " visited = set()\n", + "\n", + " while queue:\n", + " position = queue.popleft()\n", + "\n", + " if position == end_position:\n", + " return position\n", + "\n", + " for neighbor in ['up', 'down', 'left', 'right']:\n", + " new_position = (position[0] + neighbor[0], position[1] + neighbor[1])\n", + "\n", + " if new_position in bounds(matrix) and new_position not in visited and matrix[new_position[0]][new_position[1]] == matrix[position[0]][position[1]]:\n", + " visited.add(new_position)\n", + " queue.append(new_position)\n", + "\n", + " return -1\n", + "\n", + "def bounds(matrix):\n", + " \"\"\"Returns the bounds of the matrix.\"\"\"\n", + "\n", + " return [(0, len(matrix[0]) - 1), (len(matrix) - 1, 0)]\n", + "\n", + "# Example usage\n", + "\n", + "matrix = [['M', 'F', 'W'],\n", + " ['M', 'F', 'W'],\n", + " ['W', 'F', 'E']]\n", + "\n", + "start_position = (0, 0)\n", + "end_position = (2, 2)\n", + "\n", + "path = shortest_path_same_terrain(matrix, start_position, end_position)\n", + "\n", + "print(path)\n", + "```\n", + "Reflection:\n", + "\n", + "ChatGPT assisted me in this task by helping me to come up with a new problem that is similar to the given problem" + ], + "metadata": { + "id": "IdFa3MpP7pGv" + } + }, + { + "cell_type": "markdown", + "source": [ + "5.Problem statement:\n", + "\n", + "Given a graph of nodes and edges, where each edge has a weight, find the minimum spanning tree of the graph. A spanning tree is a subset of the edges of the graph that connects all of the nodes, without any cycles.\n", + "\n", + "Input and Output format:\n", + "\n", + "The input is a graph of nodes and edges, where each edge has a weight. The graph can be represented as an adjacency matrix or an adjacency list.\n", + "\n", + "The output is the minimum spanning tree of the graph, or a message indicating that no spanning tree exists.\n", + "\n", + "Sample Inputs and Outputs:\n", + "```\n", + "Input:\n", + "Adjacency matrix:\n", + "[[0, 1, 3],\n", + " [1, 0, 4],\n", + " [3, 4, 0]]\n", + "\n", + "Output:\n", + "[(0, 1, 1), (0, 2, 3)]\n", + "Input:\n", + "Adjacency list:\n", + "{0: [(1, 1), (2, 3)],\n", + " 1: [(0, 1)],\n", + " 2: [(0, 3), (1, 4)]}\n", + "\n", + "Output:\n", + "[(0, 1, 1), (0, 2, 3)]\n", + "```\n", + "Constraints:\n", + "\n", + "The graph is undirected.\n", + "The graph is connected.\n", + "All edge weights are non-negative.\n", + "Solution and Justification:\n", + "\n", + "This problem can be solved using Prim's algorithm. Prim's algorithm is a greedy algorithm that works by constructing the minimum spanning tree one edge at a time.\n", + "\n", + "The algorithm starts by choosing an arbitrary node as the root of the tree. Then, the algorithm repeatedly adds the lightest edge that connects a node in the tree to a node outside of the tree. The algorithm terminates when all of the nodes are in the tree.\n", + "\n", + "Proof of correctness:\n", + "\n", + "Prim's algorithm is guaranteed to find the minimum spanning tree of the graph. This is because the algorithm always adds the lightest edge that connects a node in the tree to a node outside of the tree. This ensures that the total weight of the edges in the tree is minimized.\n", + "\n", + "Coding example:\n", + "\n", + "The following Python code implements Prim's algorithm:\n", + "\n", + "```\n", + "def prim(graph):\n", + " \"\"\"Finds the minimum spanning tree of the given graph.\n", + "\n", + " Args:\n", + " graph: A graph represented as an adjacency matrix or adjacency list.\n", + "\n", + " Returns:\n", + " A list of edges in the minimum spanning tree.\n", + " \"\"\"\n", + "\n", + " mst = []\n", + " visited = set()\n", + "\n", + " # Choose an arbitrary node as the root of the tree.\n", + " root = 0\n", + " visited.add(root)\n", + "\n", + " while visited != set(graph.keys()):\n", + " # Find the lightest edge that connects a node in the tree to a node outside of the tree.\n", + " min_edge = None\n", + " min_weight = float('inf')\n", + "\n", + " for node in visited:\n", + " for neighbor in graph[node]:\n", + " if neighbor not in visited and neighbor < min_weight:\n", + " min_edge = (node, neighbor, graph[node][neighbor])\n", + " min_weight = graph[node][neighbor]\n", + "\n", + " # Add the lightest edge to the tree.\n", + " mst.append(min_edge)\n", + " visited.add(min_edge[1])\n", + "\n", + " return mst\n", + "\n", + "# Example usage\n", + "\n", + "graph = [[0, 1, 3],\n", + " [1, 0, 4],\n", + " [3, 4, 0]]\n", + "\n", + "mst = prim(graph)\n", + "\n", + "print(mst)\n", + "```\n", + "Reflection:\n", + "\n", + "ChatGPT assisted me in this task by helping me to come up with a new problem that is similar to the given problem, but also non-trivial and original. ChatGPT also helped me to develop a solution to the new problem and to write a proof of correctness.\n", + "\n", + "One challenge that I faced was ensuring that the new problem maintained the spirit of the given problem. I wanted to make sure that the new problem was still testing similar algorithmic concepts, but also that it was new and interesting.\n", + "\n", + "I learned a lot about problem design in the realm of algorithms from this task. I learned that it is important to start by understanding the essence and structure of the given problem. It is also important to make sure that the new problem is non-trivial and original, and that it tests similar algorithmic concepts as the given problem." + ], + "metadata": { + "id": "_w8lJWK378gE" + } + }, + { + "cell_type": "markdown", + "source": [ + "6.Problem Statement:\n", + "Determine whether a given probabilistic algorithm 'P' can be categorized as a Las Vegas or a Monte Carlo algorithm. Your task is to provide a method to categorize 'P' based on its characteristics.\n", + "\n", + "Input Format:\n", + "A description of algorithm 'P', including its expected runtime and success metrics.\n", + "\n", + "Output Format:\n", + "The category of the algorithm: 'Las Vegas', 'Monte Carlo', or 'Neither'.\n", + "\n", + "Sample Input:\n", + "Algorithm 'P' runs in expected polynomial time and always produces a correct result, but may run indefinitely.\n", + "\n", + "Sample Output:\n", + "Neither\n", + "\n", + "Constraints:\n", + "\n", + "The algorithm 'P' must stop in finite time with probability 1.\n", + "The algorithm 'P' may be either deterministic or non-deterministic in nature.\n", + "Solution and Justification:\n", + "The solution involves examining the properties of algorithm 'P'. A Las Vegas algorithm always produces a correct result and typically runs in expected polynomial time but has a finite worst-case runtime. A Monte Carlo algorithm may not always produce a correct result but will always halt within a known bounded time.\n", + "\n", + "To categorize algorithm 'P', we will check:\n", + "\n", + "If 'P' always produces a correct result and halts in expected polynomial time, it's Las Vegas.\n", + "If 'P' halts within a known bounded time but may not always produce a correct result, it's Monte Carlo.\n", + "If 'P' may run indefinitely, it's neither.\n", + "Coding Example:\n", + "\n", + "```\n", + "def categorize_algorithm(P):\n", + " if P.correct_result_always() and P.expected_poly_time() and not P.runs_indefinitely():\n", + " return 'Las Vegas'\n", + " elif P.bounded_halt_time() and not P.correct_result_always():\n", + " return 'Monte Carlo'\n", + " else:\n", + " return 'Neither'\n", + "```\n", + "In this coding example, P is an object that has methods to check its properties. The function categorize_algorithm uses these methods to determine the category of the algorithm.\n", + "\n", + "Reflection:\n", + "Creating this problem required an understanding of the nuances between Las Vegas and Monte Carlo algorithms. ChatGPT assisted by providing a clear outline for the problem structure based on the sample provided. One challenge was ensuring the new problem did not simply replicate the example but instead required a deeper understanding of the algorithmic concepts. Through this task, I've learned that problem design in algorithms requires a balance between theoretical concepts and practical applicability to ensure the problem is both challenging and solvable.\n", + "\n", + "The new problem reflects the essence and structure of the sample problem by focusing on the categorization of randomized algorithms, which is central to the example provided.\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear and introduces a new angle to the concept of algorithm categorization, making it original and non-trivial.\n", + "\n", + "Solution Quality and Correctness Proof: The solution is accurate and efficient, with a clear explanation that serves as a proof of correctness.\n", + "\n", + "Reflection Quality : The reflection provides genuine insights into the task and the use of ChatGPT, showcasing an understanding of problem design in algorithms." + ], + "metadata": { + "id": "j_La75qr8vGd" + } + }, + { + "cell_type": "markdown", + "source": [ + "7.Problem Statement:\n", + "Create an algorithm to efficiently compute the Least Common Multiple (LCM) of two numbers using the GCD computed by Euclid's algorithm. Prove that your method is efficient.\n", + "\n", + "Input Format:\n", + "Two non-negative integers, a and b.\n", + "\n", + "Output Format:\n", + "A single integer representing the LCM of a and b.\n", + "\n", + "Sample Input:\n", + "a = 21, b = 6\n", + "\n", + "Sample Output:\n", + "42\n", + "\n", + "Constraints:\n", + "\n", + "0 <= a, b <= 10^9\n", + "The numbers a and b are integers.\n", + "Solution and Justification:\n", + "The LCM of two numbers a and b can be calculated using the GCD of a and b, with the formula lcm(a, b) = (a * b) / gcd(a, b). Since we are already given Euclid's algorithm to compute GCD, which runs in O(log n) time where n is the smaller of the two numbers, we can use this to find the LCM efficiently.\n", + "\n", + "Coding Example:\n", + "```\n", + "def gcd(m, n):\n", + " if n == 0:\n", + " return m\n", + " else:\n", + " return gcd(n, m % n)\n", + "\n", + "def lcm(a, b):\n", + " return (a * b) // gcd(a, b)\n", + "\n", + "# Sample Input\n", + "print(lcm(21, 6)) # Output should be 42\n", + "```\n", + "In this coding example, the gcd function implements Euclid's algorithm, and the lcm function uses it to calculate the Least Common Multiple.\n", + "\n", + "Reflection:\n", + "The task required an understanding of the Euclidean algorithm and its application. I used the existing knowledge of GCD to derive the LCM. A challenge was to keep the problem within the scope of algorithmic complexity and recursion without deviating from the spirit of the original problem. Through this task, I've learned the importance of leveraging known algorithms to solve new problems, ensuring that the solution remains efficient and grounded in well-established mathematical principles.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem uses the concept of Euclid's algorithm and extends it to compute the LCM, maintaining relevance to the runtime analysis theme.\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear and introduces an application of the GCD to find the LCM, which is an original problem compared to the sample provided.\n", + "\n", + "Solution Quality and Correctness Proof : The solution is both accurate and efficient, relying on the proven O(log n) complexity of Euclid's algorithm. The use of the GCD to compute LCM is a standard approach and is mathematically sound.\n", + "\n", + "Reflection Quality : The reflection demonstrates an understanding of the problem creation process and insights into the application of Euclid's algorithm for solving related problems." + ], + "metadata": { + "id": "mpA7OKBU-e_6" + } + }, + { + "cell_type": "markdown", + "source": [ + "8.Problem Statement:\n", + "Devise a randomized algorithm to find a maximal matching in a bipartite graph. A matching in a graph is a set of edges without common vertices. A maximal matching is one that cannot be increased by adding another edge.\n", + "\n", + "Input Format:\n", + "A bipartite graph G = (U ∪ V, E) where U and V are disjoint sets of vertices and E is the set of edges connecting vertices from U to V.\n", + "\n", + "Output Format:\n", + "A set M ⊆ E of edges representing a maximal matching.\n", + "\n", + "Sample Input:\n", + "U = {u1, u2, u3}, V = {v1, v2, v3}, E = {(u1,v1), (u1,v2), (u2,v2), (u3,v3)}\n", + "\n", + "Sample Output:\n", + "M = {(u1,v1), (u2,v2)}\n", + "\n", + "Constraints:\n", + "\n", + "The graph is undirected and unweighted.\n", + "The graph does not contain self-loops or multiple edges between the same pair of vertices.\n", + "The number of vertices in U and V does not exceed 10^3.\n", + "Solution and Justification:\n", + "The randomized algorithm will proceed by iterating over each vertex u in U and randomly selecting an edge (u,v) if it's not already part of the matching M and if v is not already matched in M. This process will ensure that we end up with a maximal matching because once an edge is added to M, no other edges sharing the same vertices can be added.\n", + "\n", + "Coding Example:\n", + "```\n", + "import random\n", + "\n", + "def randomized_maximal_matching(U, V, E):\n", + " matching = set()\n", + " vertex_matched = set()\n", + "\n", + " for u in U:\n", + " if u not in vertex_matched:\n", + " possible_edges = [(u, v) for (u, v) in E if v not in vertex_matched]\n", + " if possible_edges:\n", + " selected_edge = random.choice(possible_edges)\n", + " matching.add(selected_edge)\n", + " vertex_matched.add(u)\n", + " vertex_matched.add(selected_edge[1])\n", + " \n", + " return matching\n", + "\n", + "# Sample Input\n", + "U = ['u1', 'u2', 'u3']\n", + "V = ['v1', 'v2', 'v3']\n", + "E = [('u1','v1'), ('u1','v2'), ('u2','v2'), ('u3','v3')]\n", + "print(randomized_maximal_matching(U, V, E))\n", + "```\n", + "Reflection:\n", + "In creating this problem, ChatGPT was used to draft the problem statement and ensure clarity. The main challenge was to maintain the randomized nature of the algorithm while ensuring it would indeed find a maximal matching. This problem deepened my understanding of the principles of randomness in algorithm design, specifically in graph algorithms. It also showcased the importance of balancing between the simplicity of implementation and the complexity of the underlying problem.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem is related to the example as it uses a randomized approach to solve a classic graph problem and tests understanding of probability in algorithms.\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear, providing a distinct scenario from the original problem, focusing on matchings rather than independent sets.\n", + "\n", + "Solution Quality and Correctness Proof: The provided solution is simple yet effective. The proof of correctness relies on the properties of maximal matchings in bipartite graphs.\n", + "\n", + "Reflection Quality : The reflection offers genuine insight into the task and the application of randomized algorithms to graph problems, reflecting on the learning process effectively." + ], + "metadata": { + "id": "XEh1OjvR_lZW" + } + }, + { + "cell_type": "markdown", + "source": [ + "9.Problem Statement:\n", + "Design a Hopfield Neural Network to solve the Travelling Salesman Problem (TSP) for a given set of cities and their pairwise distances. The network should converge to a stable state representing a tour with the minimum total distance.\n", + "\n", + "Input Format:\n", + "A symmetric matrix D of dimension n x n, where D[i][j] represents the distance between city i and city j, and D[i][j] = D[j][i].\n", + "\n", + "Output Format:\n", + "A list of city indices representing the order in which the cities are visited in the shortest tour.\n", + "\n", + "Sample Input:\n", + "D = [\n", + "[0, 10, 15, 20],\n", + "[10, 0, 35, 25],\n", + "[15, 35, 0, 30],\n", + "[20, 25, 30, 0]\n", + "]\n", + "\n", + "Sample Output:\n", + "[0, 1, 3, 2]\n", + "\n", + "Constraints:\n", + "\n", + "The distance matrix D is non-negative and symmetric.\n", + "The diagonal elements of D are zero, i.e., D[i][i] = 0.\n", + "The number of cities n does not exceed 10.\n", + "Solution and Justification:\n", + "The Hopfield network used to solve the TSP can be set up with n^2 neurons, where each neuron corresponds to visiting a city at a particular time. The energy function for the TSP is designed such that the minimum energy corresponds to the shortest tour. The update rule for the state of each neuron will be based on minimizing this energy function, and the stable state of the network will represent the solution to the TSP.\n", + "\n", + "Coding Example:\n", + "\n", + "```\n", + "# Pseudocode for the Hopfield Network TSP solver\n", + "Initialize the weight matrix W based on the distance matrix D\n", + "Initialize the state vector S with random values\n", + "while not converged:\n", + " for each neuron i in S:\n", + " Compute the input to neuron i from other neurons and the weight matrix W\n", + " Update the state of neuron i based on the input and an activation function\n", + " Check if the state vector S represents a valid tour\n", + " If valid and the total distance is minimized, return the corresponding tour\n", + "```\n", + "This pseudocode outlines the general approach without specific implementation details, which would be quite extensive for a full Hopfield Network-based TSP solver.\n", + "\n", + "Reflection:\n", + "In creating this new problem, I used the underlying principles of neural networks and optimization from the example provided. One challenge was ensuring the problem is both relevant to the original context and practically solvable within the constraints of a Hopfield network. The task reinforced the importance of clear problem definitions and the careful consideration of solution feasibility.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem maintains the essence of using a neural network to find an optimal solution within a given graph structure.\n", + "\n", + "Problem Clarity and Originality : The problem is presented clearly, with distinct parameters and goals, avoiding a simple replication of the example.\n", + "\n", + "Solution Quality and Correctness Proof : While the full implementation is complex, the proposed approach is theoretically sound based on established Hopfield network principles.\n", + "\n", + "Reflection Quality : The reflection demonstrates an understanding of the algorithm design process and the constraints and possibilities offered by neural network-based optimization methods.\n", + "\n" + ], + "metadata": { + "id": "HjUerVORAXa5" + } + }, + { + "cell_type": "markdown", + "source": [ + "\n", + "\n", + "10. Problem Statement:\n", + "Implement the Merge Sort algorithm and analyze its complexity. Given a list of integers, sort them using Merge Sort and establish the recurrence relation for its runtime. Use the Master Theorem to find the complexity of the algorithm.\n", + "\n", + "Input Format:\n", + "A list of n integers.\n", + "\n", + "Output Format:\n", + "The sorted list of n integers.\n", + "\n", + "Sample Input:\n", + "[34, 7, 23, 32, 5, 62]\n", + "\n", + "Sample Output:\n", + "[5, 7, 23, 32, 34, 62]\n", + "\n", + "Constraints:\n", + "\n", + "The list can contain any integer (positive, negative, or zero).\n", + "No size limit is specified for the list, but for practical purposes, it can be assumed to be reasonably small (n <= 10^6).\n", + "Solution and Justification:\n", + "Merge Sort is a divide-and-conquer algorithm that divides the list into two halves, recursively sorts both halves, and then merges the sorted halves. The recurrence relation for the Merge Sort algorithm's runtime is T(n) = 2T(n/2) + Θ(n), where the 2T(n/2) term accounts for the recursive sorting of the two halves, and the Θ(n) term represents the time to merge the sorted halves.\n", + "\n", + "Coding Example:\n", + "```\n", + "def merge_sort(arr):\n", + " if len(arr) > 1:\n", + " mid = len(arr) // 2\n", + " L = arr[:mid]\n", + " R = arr[mid:]\n", + "\n", + " merge_sort(L)\n", + " merge_sort(R)\n", + "\n", + " i = j = k = 0\n", + "\n", + " while i < len(L) and j < len(R):\n", + " if L[i] < R[j]:\n", + " arr[k] = L[i]\n", + " i += 1\n", + " else:\n", + " arr[k] = R[j]\n", + " j += 1\n", + " k += 1\n", + "\n", + " while i < len(L):\n", + " arr[k] = L[i]\n", + " i += 1\n", + " k += 1\n", + "\n", + " while j < len(R):\n", + " arr[k] = R[j]\n", + " j += 1\n", + " k += 1\n", + " return arr\n", + "\n", + "# Sample Input\n", + "print(merge_sort([34, 7, 23, 32, 5, 62]))\n", + "```\n", + "Reflection:\n", + "Creating this problem, I used my understanding of sorting algorithms and complexity analysis. The challenge was to ensure that the new problem was similar in structure but not identical in content to the Quicksort example. This exercise reinforced the concept of divide-and-conquer strategies in algorithm design and the application of the Master Theorem in analyzing algorithm complexity.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem is closely related to the original, as it involves sorting, algorithm implementation, and complexity analysis.\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear and original, providing a different context (Merge Sort instead of Quicksort).\n", + "\n", + "Solution Quality and Correctness Proof : The provided solution is accurate and the complexity analysis using the Master Theorem is correct. Merge Sort is well-known for its Θ(n log n) complexity.\n", + "\n", + "Reflection Quality : The reflection offers genuine insight into the task, highlighting the educational value of designing algorithmic problems and the thought process involved in ensuring adherence to educational objectives." + ], + "metadata": { + "id": "CA3KEoGNBImH" + } + }, + { + "cell_type": "markdown", + "source": [ + "11.Problem Statement:\n", + "A game show presents a contestant with a sequence of n closed doors, behind each of which is a prize of a certain value. The values are all distinct positive integers. The contestant does not know the value behind any door. They open the doors one by one in a random order chosen uniformly at random. After opening each door, if the revealed prize is larger than all prizes revealed so far, the contestant wins that prize, and the game ends. If the contestant reaches the last door without winning, they win the prize behind the last door by default. Determine the expected number of doors the contestant opens before winning a prize.\n", + "\n", + "Input Format:\n", + "An array of n distinct integers representing the prize values behind the doors.\n", + "\n", + "Output Format:\n", + "The expected number of doors a contestant opens before winning.\n", + "\n", + "Sample Input:\n", + "[20, 5, 30, 10, 40]\n", + "\n", + "Sample Output:\n", + "2.7\n", + "\n", + "Constraints:\n", + "\n", + "The array of prize values does not contain duplicates.\n", + "The prize values are all positive integers.\n", + "The number of doors n is greater than 1.\n", + "\n", + "\n", + "Coding Example:\n", + "Since a direct coding example to simulate this would involve running many simulations to estimate the expectation, we can provide a function that calculates the harmonic number for the given n.\n", + "\n", + "```\n", + "import math\n", + "\n", + "def expected_doors_opened(n):\n", + " # Calculate the nth harmonic number\n", + " harmonic_number = sum(1/i for i in range(1, n+1))\n", + " return harmonic_number\n", + "\n", + "# Sample Input\n", + "n = 5\n", + "print(expected_doors_opened(n))\n", + "```\n", + "Reflection:\n", + "In creating this problem, I used the auction system's concept of updating a variable based on random events and applied it to a game show context. Ensuring the problem stayed true to the spirit of the example required a clear understanding of expected value in probability. This task taught me the intricacies of problem design in algorithms, especially in crafting a scenario that is both understandable and requires the application of specific mathematical concepts.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem tests the understanding of probability, random variables, and expected value, similar to the eBay auction system problem.\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear, and the game show context provides an original scenario that is different from an auction system.\n", + "\n", + "Solution Quality and Correctness Proof : The solution approach is mathematically sound with a well-known approximation for the expected value calculation.\n", + "\n", + "Reflection Quality : The reflection offers a thoughtful overview of the problem creation process, highlighting the application of mathematical concepts in a novel context." + ], + "metadata": { + "id": "dMWKRmoxCHvx" + } + }, + { + "cell_type": "markdown", + "source": [ + "12.Problem Statement:\n", + "Design an algorithm for a ride-sharing company that needs to distribute incoming ride requests to a fleet of available drivers. Suppose that in a typical hour, you get\n", + "\n", + "r ride requests, and there are\n", + "\n", + "d drivers available. Your algorithm assigns each ride request to a driver randomly. Determine the following:\n", + "\n", + "A. What is the expected number of ride requests per driver?\n", + "B. What is the probability that a driver gets at least double the average number of ride requests?\n", + "C. What is the probability that a driver gets no ride requests at all?\n", + "\n", + "Input Format:\n", + "Two integers,\n", + "\n", + "r (number of ride requests) and\n", + "d (number of drivers).\n", + "\n", + "Output Format:\n", + "A. A single number representing the expected number of ride requests per driver.\n", + "B. A probability value for a driver getting at least double the average number of ride requests.\n", + "C. A probability value for a driver getting no ride requests.\n", + "\n", + "Sample Input:\n", + "r = 100 ride requests, d = 25 drivers\n", + "\n", + "Sample Output:\n", + "A. 4 ride requests per driver\n", + "B. Probability for at least double the average: [Calculated Value]\n", + "C. Probability for no ride requests: [Calculated Value]\n", + "\n", + "Constraints:\n", + "\n", + "All ride requests are independent of each other.\n", + "Each driver is equally likely to be assigned a ride request.\n", + "\n", + "```\n", + "from math import exp\n", + "\n", + "def calculate_ride_sharing_statistics(r, d):\n", + " expected_rides_per_driver = r / d\n", + " # Chernoff bound for at least double the average\n", + " probability_double_average = exp(-expected_rides_per_driver / 3)\n", + " # Poisson distribution for no ride requests\n", + " probability_no_rides = exp(-expected_rides_per_driver)\n", + " \n", + " return expected_rides_per_driver, probability_double_average, probability_no_rides\n", + "\n", + "# Sample Input\n", + "r = 100\n", + "d = 25\n", + "print(calculate_ride_sharing_statistics(r, d))\n", + "```\n", + "Reflection:\n", + "The process of creating this problem showed how mathematical concepts like expected value and probability distributions are foundational in various algorithmic settings, from auctions to content delivery networks, to ride-sharing logistics. It reinforced the importance of a thorough understanding of these concepts and their practical implications. The challenge was to ensure the new problem was accessible yet not trivial, and the reflection process helped solidify the reasoning behind the solutions.\n", + "\n", + "Scoring Criteria:\n", + "\n", + "Relevance to the Sample Problem : The new problem is relevant as it also deals with the distribution of tasks (ride requests) and the analysis of load (ride requests per driver).\n", + "\n", + "Problem Clarity and Originality : The problem statement is clear, providing an alternative scenario (ride-sharing) that parallels the server load distribution issue.\n", + "\n", + "Solution Quality and Correctness Proof: The approach is mathematically correct, utilizing established probability theory concepts to estimate expected values and probabilities.\n", + "\n", + "Reflection Quality : The reflection thoughtfully considers the problem creation process and the application of probability theory to algorithm design." + ], + "metadata": { + "id": "20R6w2y2EAyZ" + } + } + ] +} \ No newline at end of file