From bb8cab733a2014973166d8ac4e8cceee18ac5a06 Mon Sep 17 00:00:00 2001 From: saga <42408+saga@users.noreply.github.com> Date: Wed, 15 Oct 2025 01:49:51 +0000 Subject: [PATCH] chore: update roadmap content json --- public/roadmap-content/ai-engineer.json | 194 ++- public/roadmap-content/api-design.json | 63 +- public/roadmap-content/backend.json | 696 +++------- public/roadmap-content/blockchain.json | 16 - public/roadmap-content/computer-science.json | 10 + public/roadmap-content/data-analyst.json | 80 -- public/roadmap-content/devops.json | 99 +- public/roadmap-content/docker.json | 16 - .../roadmap-content/engineering-manager.json | 35 +- public/roadmap-content/frontend.json | 1162 +++-------------- public/roadmap-content/linux.json | 936 +++++++++++++ .../roadmap-content/software-architect.json | 44 +- public/roadmap-content/system-design.json | 4 +- 13 files changed, 1411 insertions(+), 1944 deletions(-) create mode 100644 public/roadmap-content/linux.json diff --git a/public/roadmap-content/ai-engineer.json b/public/roadmap-content/ai-engineer.json index 27c09d679735..78fe9010e37e 100644 --- a/public/roadmap-content/ai-engineer.json +++ b/public/roadmap-content/ai-engineer.json @@ -471,17 +471,6 @@ } ] }, - "5ShWZl1QUqPwO-NRGN85V": { - "title": "OpenAI Models", - "description": "OpenAI provides a variety of models designed for diverse tasks. GPT models like GPT-3 and GPT-4 handle text generation, conversation, and translation, offering context-aware responses, while Codex specializes in generating and debugging code across multiple languages. DALL-E creates images from text descriptions, supporting applications in design and content creation, and Whisper is a speech recognition model that converts spoken language to text for transcription and voice-to-text tasks.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Models Overview", - "url": "https://platform.openai.com/docs/models", - "type": "article" - } - ] - }, "zdeuA4GbdBl2DwKgiOA4G": { "title": "OpenAI API", "description": "The OpenAI API provides access to powerful AI models like GPT, Codex, DALL-E, and Whisper, enabling developers to integrate capabilities such as text generation, code assistance, image creation, and speech recognition into their applications via a simple, scalable interface.\n\nLearn more from the following resources:", @@ -1323,21 +1312,10 @@ } ] }, - "lVhWhZGR558O-ljHobxIi": { - "title": "RAG & Implementation", - "description": "Retrieval-Augmented Generation (RAG) combines information retrieval with language generation to produce more accurate, context-aware responses. It uses two components: a retriever, which searches a database to find relevant information, and a generator, which crafts a response based on the retrieved data. Implementing RAG involves using a retrieval model (e.g., embeddings and vector search) alongside a generative language model (like GPT). The process starts by converting a query into embeddings, retrieving relevant documents from a vector database, and feeding them to the language model, which then generates a coherent, informed response. This approach grounds outputs in real-world data, resulting in more reliable and detailed answers.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is RAG?", - "url": "https://aws.amazon.com/what-is/retrieval-augmented-generation/", - "type": "article" - }, - { - "title": "What is Retrieval-Augmented Generation? IBM", - "url": "https://www.youtube.com/watch?v=T-D1OfcDW1M", - "type": "video" - } - ] + "what-are-rags@lVhWhZGR558O-ljHobxIi.md": { + "title": "What are RAGs?", + "description": "", + "links": [] }, "GCn4LGNEtPI0NWYAZCRE-": { "title": "RAG Usecases", @@ -1487,53 +1465,20 @@ } ] }, - "ebXXEhNRROjbbof-Gym4p": { + "langchain@jM-Jbo0wUilhVY830hetJ.md": { "title": "Langchain", - "description": "LangChain is a development framework that simplifies building applications powered by language models, enabling seamless integration of multiple AI models and data sources. It focuses on creating chains, or sequences, of operations where language models can interact with databases, APIs, and other models to perform complex tasks. LangChain offers tools for prompt management, data retrieval, and workflow orchestration, making it easier to develop robust, scalable applications like chatbots, automated data analysis, and multi-step reasoning systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "LangChain", - "url": "https://www.langchain.com/", - "type": "article" - }, - { - "title": "What is LangChain?", - "url": "https://www.youtube.com/watch?v=1bUy-1hGZpI", - "type": "video" - } - ] + "description": "", + "links": [] }, - "d0ontCII8KI8wfP-8Y45R": { + "llama-index@JT4mBXOjvvrUnynA7yrqt.md": { "title": "Llama Index", - "description": "LlamaIndex, formerly known as GPT Index, is a tool designed to facilitate the integration of large language models (LLMs) with structured and unstructured data sources. It acts as a data framework that helps developers build retrieval-augmented generation (RAG) applications by indexing various types of data, such as documents, databases, and APIs, enabling LLMs to query and retrieve relevant information efficiently.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Llama Index", - "url": "https://docs.llamaindex.ai/en/stable/", - "type": "article" - }, - { - "title": "Introduction to LlamaIndex with Python (2024)", - "url": "https://www.youtube.com/watch?v=cCyYGYyCka4", - "type": "video" - } - ] + "description": "", + "links": [] }, - "eOqCBgBTKM8CmY3nsWjre": { - "title": "Open AI Assistant API", - "description": "The OpenAI Assistant API enables developers to create advanced conversational systems using models like GPT-4. It supports multi-turn conversations, allowing the AI to maintain context across exchanges, which is ideal for chatbots, virtual assistants, and interactive applications. Developers can customize interactions by defining roles, such as system, user, and assistant, to guide the assistant's behavior. With features like temperature control, token limits, and stop sequences, the API offers flexibility to ensure responses are relevant, safe, and tailored to specific use cases.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Assistants API – Course for Beginners", - "url": "https://www.youtube.com/watch?v=qHPonmSX4Ms", - "type": "course" - }, - { - "title": "Assistants API", - "url": "https://platform.openai.com/docs/assistants/overview", - "type": "article" - } - ] + "open-ai-response-api@eOqCBgBTKM8CmY3nsWjre.md": { + "title": "Open AI Response API", + "description": "", + "links": [] }, "c0RPhpD00VIUgF4HJgN2T": { "title": "Replicate", @@ -1551,26 +1496,10 @@ } ] }, - "AeHkNU-uJ_gBdo5-xdpEu": { + "ai-agents@4_ap0rD9Gl6Ep_4jMfPpG.md": { "title": "AI Agents", - "description": "In AI engineering, \"agents\" refer to autonomous systems or components that can perceive their environment, make decisions, and take actions to achieve specific goals. Agents often interact with external systems, users, or other agents to carry out complex tasks. They can vary in complexity, from simple rule-based bots to sophisticated AI-powered agents that leverage machine learning models, natural language processing, and reinforcement learning.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Building an AI Agent Tutorial - LangChain", - "url": "https://python.langchain.com/docs/tutorials/agents/", - "type": "article" - }, - { - "title": "AI agents and their types", - "url": "https://play.ht/blog/ai-agents-use-cases/", - "type": "article" - }, - { - "title": "The Complete Guide to Building AI Agents for Beginners", - "url": "https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX", - "type": "video" - } - ] + "description": "", + "links": [] }, "778HsQzTuJ_3c9OSn5DmH": { "title": "Agents Usecases", @@ -1641,21 +1570,10 @@ } ] }, - "mbp2NoL-VZ5hZIIblNBXt": { - "title": "OpenAI Assistant API", - "description": "The OpenAI Assistant API enables developers to create advanced conversational systems using models like GPT-4. It supports multi-turn conversations, allowing the AI to maintain context across exchanges, which is ideal for chatbots, virtual assistants, and interactive applications. Developers can customize interactions by defining roles, such as system, user, and assistant, to guide the assistant's behavior. With features like temperature control, token limits, and stop sequences, the API offers flexibility to ensure responses are relevant, safe, and tailored to specific use cases.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Assistants API – Course for Beginners", - "url": "https://www.youtube.com/watch?v=qHPonmSX4Ms", - "type": "course" - }, - { - "title": "Assistants API", - "url": "https://platform.openai.com/docs/assistants/overview", - "type": "article" - } - ] + "openai-response-api@xXLyuUNrKEc32XLQxMjgT.md": { + "title": "OpenAI Response API", + "description": "", + "links": [] }, "W7cKPt_UxcUgwp8J6hS4p": { "title": "Multimodal AI", @@ -1968,5 +1886,75 @@ "type": "article" } ] + }, + "haystack@ebXXEhNRROjbbof-Gym4p.md": { + "title": "Haystack", + "description": "", + "links": [] + }, + "ragflow@d0ontCII8KI8wfP-8Y45R.md": { + "title": "RAGFlow", + "description": "", + "links": [] + }, + "model-context-protocol-mcp@AeHkNU-uJ_gBdo5-xdpEu.md": { + "title": "Model Context Protocol (MCP)", + "description": "", + "links": [] + }, + "mcp-host@MabZ9jOrSj539C5qZrVBd.md": { + "title": "MCP Host", + "description": "", + "links": [] + }, + "mcp-server@8Xkd88EjX3GE_9DWQhr7G.md": { + "title": "MCP Server", + "description": "", + "links": [] + }, + "mcp-client@po0fIZYaFhRbNlza7sB37.md": { + "title": "MCP Client", + "description": "", + "links": [] + }, + "data-layer@Z0920V57_ziDhXbQJMN9O.md": { + "title": "Data Layer", + "description": "", + "links": [] + }, + "transport-layer@o4gHDZ5p9lyeHuCAPvAKz.md": { + "title": "Transport Layer", + "description": "", + "links": [] + }, + "building-an-mcp-server@oLGfKjcqBzJ3vd6Cg-T1B.md": { + "title": "Building an MCP Server", + "description": "", + "links": [] + }, + "building-an-mcp-client@0Rk0rCbmRFJT2GKwUibQS.md": { + "title": "Building an MCP Client", + "description": "", + "links": [] + }, + "connect-to-local-server@H-G93SsEgsA_NGL_v4hPv.md": { + "title": "Connect to Local Server", + "description": "", + "links": [] + }, + "connect-to-remote-server@2t4uINxmzfx8FUF-_i_2B.md": { + "title": "Connect to Remote Server", + "description": "", + "links": [] + }, + "vertex-ai@AxzTGDCC2Ftp4G66U4Uqr.md": { + "title": "Vertex AI", + "description": "", + "links": [] + }, + "google-adk@mbp2NoL-VZ5hZIIblNBXt.md": { + "title": "Google ADK", + "description": "", + "links": [] } } \ No newline at end of file diff --git a/public/roadmap-content/api-design.json b/public/roadmap-content/api-design.json index f4965fb60904..aa8d5ae5be54 100644 --- a/public/roadmap-content/api-design.json +++ b/public/roadmap-content/api-design.json @@ -740,42 +740,15 @@ } ] }, - "wFsbmMi5Ey9UyDADdbdPW": { - "title": "Role Based Access Control (RBAC)", - "description": "Role-Based Access Control (RBAC) is a method of managing authorization in API design that assigns system access to users based on their role within an organization. RBAC is crucial in controlling which endpoints a user can call, and what operations they are allowed to execute. In the context of API design, RBAC ensures appropriate levels of access for different types of users to guarantee data security and integrity. It simplifies the process of security administration by assigning privileges based on a user's job function, rather than on an individual basis.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Role-Based Access Control", - "url": "https://auth0.com/docs/manage-users/access-control/rbac", - "type": "article" - }, - { - "title": "What is Role-based Access Control (RBAC)?", - "url": "https://www.redhat.com/en/topics/security/what-is-role-based-access-control", - "type": "article" - }, - { - "title": "Role-based Access Control (RBAC) vs. Attribute-based Access Control (ABAC)", - "url": "https://www.youtube.com/watch?v=rvZ35YW4t5k", - "type": "video" - } - ] + "mac@tl1wXmOaj_zHL2o38VygO.md": { + "title": "MAC", + "description": "", + "links": [] }, - "dZTe_kxIUQsc9N3w920aR": { - "title": "Attribute Based Access Control (ABAC)", - "description": "Attribute Based Access Control (ABAC) is a flexible and powerful authorization method in the realm of API Design. Distinct from Role-Based Access Control (RBAC), which relies on predefined roles and permissions, ABAC uses attributes to build policies and make decisions. These attributes can be associated with the user, the action they want to perform, targeted resources, or the environment. With ABAC, finer-grained access control can be achieved, thereby improving the security and efficiency of APIs. This approach is widely used in complex and dynamic environments where access control requirements can be multifaceted and deeply context-dependent.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Attribute Based Access Control?", - "url": "https://www.okta.com/uk/blog/2020/09/attribute-based-access-control-abac/", - "type": "article" - }, - { - "title": "Attribute Based Access Control", - "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_attribute-based-access-control.html", - "type": "article" - } - ] + "rebac@CCcY8UsGdd2pdBYHt9L4o.md": { + "title": "ReBAC", + "description": "", + "links": [] }, "tzUJwXu_scwQHnPPT0oY-": { "title": "API Keys & Management", @@ -1596,5 +1569,25 @@ "type": "video" } ] + }, + "dac@_BXgYUlaYfpYrryXTw5n2.md": { + "title": "DAC", + "description": "", + "links": [] + }, + "abac@dZTe_kxIUQsc9N3w920aR.md": { + "title": "ABAC", + "description": "", + "links": [] + }, + "pbac@nJWtUyn9bljh3T-q_adJK.md": { + "title": "PBAC", + "description": "", + "links": [] + }, + "rbac@wFsbmMi5Ey9UyDADdbdPW.md": { + "title": "RBAC", + "description": "", + "links": [] } } \ No newline at end of file diff --git a/public/roadmap-content/backend.json b/public/roadmap-content/backend.json index db629712a0ab..982ead2a829b 100644 --- a/public/roadmap-content/backend.json +++ b/public/roadmap-content/backend.json @@ -1,27 +1,37 @@ { - "gKTSe9yQFVbPVlLzWB0hC": { - "title": "Search Engines", - "description": "Search engines like Elasticsearch are specialized tools designed for fast, scalable, and flexible searching and analyzing of large volumes of data. Elasticsearch is an open-source, distributed search and analytics engine built on Apache Lucene, offering full-text search capabilities, real-time indexing, and advanced querying features. Key characteristics of search engines like Elasticsearch include:\n\n1. **Full-Text Search**: Support for complex search queries, including relevance scoring and text analysis.\n2. **Distributed Architecture**: Scalability through horizontal distribution across multiple nodes or servers.\n3. **Real-Time Indexing**: Ability to index and search data almost instantaneously.\n4. **Powerful Query DSL**: A domain-specific language for constructing and executing sophisticated queries.\n5. **Analytics**: Capabilities for aggregating and analyzing data, often used for log and event data analysis.\n\nVisit the following resources to learn more:", + "9cD5ag1L0GqHx4_zxc5JX": { + "title": "Open API Specs", + "description": "The OpenAPI Specification (OAS), formerly known as Swagger, is a standard for defining and documenting RESTful APIs. It provides a structured format in YAML or JSON to describe API endpoints, request and response formats, authentication methods, and other metadata. By using OAS, developers can create a comprehensive and machine-readable API description that facilitates client generation, automated documentation, and testing. This specification promotes consistency and clarity in API design, enhances interoperability between different systems, and enables tools to generate client libraries, server stubs, and interactive API documentation.\n\nVisit the following resources to learn more:", "links": [ { - "title": "Elasticsearch", - "url": "https://www.elastic.co/elasticsearch/", + "title": "OpenAPI Specification Website", + "url": "https://swagger.io/specification/", + "type": "article" + }, + { + "title": "Open API Live Editor", + "url": "https://swagger.io/tools/swagger-editor/", "type": "article" + }, + { + "title": "OpenAPI 3.0: How to Design and Document APIs with the Latest OpenAPI Specification 3.0", + "url": "https://www.youtube.com/watch?v=6kwmW_p_Tig", + "type": "video" + }, + { + "title": " REST API and OpenAPI: It’s Not an Either/Or Question", + "url": "https://www.youtube.com/watch?v=pRS9LRBgjYg", + "type": "video" } ] }, - "9Fpoor-Os_9lvrwu5Zjh-": { - "title": "Design and Development Principles", - "description": "Design and Development Principles are fundamental guidelines that inform the creation of software systems. Key principles include:\n\n* SOLID (Single Responsibility, Open-Closed, Liskov Substitution, Interface Segregation, Dependency Inversion)\n* DRY (Don't Repeat Yourself)\n* KISS (Keep It Simple, Stupid)\n* YAGNI (You Aren't Gonna Need It)\n* Separation of Concerns\n* Modularity\n* Encapsulation\n* Composition over Inheritance\n* Loose Coupling and High Cohesion\n* Principle of Least Astonishment\n\nVisit the following resources to learn more:", + "gKTSe9yQFVbPVlLzWB0hC": { + "title": "Search Engines", + "description": "Search engines like Elasticsearch are specialized tools designed for fast, scalable, and flexible searching and analyzing of large volumes of data. Elasticsearch is an open-source, distributed search and analytics engine built on Apache Lucene, offering full-text search capabilities, real-time indexing, and advanced querying features. Key characteristics of search engines like Elasticsearch include:\n\n1. **Full-Text Search**: Support for complex search queries, including relevance scoring and text analysis.\n2. **Distributed Architecture**: Scalability through horizontal distribution across multiple nodes or servers.\n3. **Real-Time Indexing**: Ability to index and search data almost instantaneously.\n4. **Powerful Query DSL**: A domain-specific language for constructing and executing sophisticated queries.\n5. **Analytics**: Capabilities for aggregating and analyzing data, often used for log and event data analysis.\n\nVisit the following resources to learn more:", "links": [ { - "title": "Design Principles - Wikipedia", - "url": "https://en.wikipedia.org/wiki/Design_principles", - "type": "article" - }, - { - "title": "Design Principles - Microsoft", - "url": "https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/index", + "title": "Elasticsearch", + "url": "https://www.elastic.co/elasticsearch/", "type": "article" } ] @@ -351,9 +361,9 @@ } ] }, - "2f0ZO6GJElfZ2Eis28Hzg": { - "title": "Pick a Language", - "description": "Even if you’re a beginner, the first thing you’ll learn is that web development is primarily divided into two aspects: Frontend Development and Backend Development, each with its unique tools and technologies. For Frontend Development, foundational technologies include HTML, CSS, and JavaScript. Similarly, Backend Development revolves around server-side programming languages like Python, Java, or Node.js, complemented by databases, frameworks, and web servers that enable website functionality.", + "pick-a-backend-language@2f0ZO6GJElfZ2Eis28Hzg.md": { + "title": "Pick a Backend Language", + "description": "", "links": [] }, "_I1E__wCIVrhjMk6IMieE": { @@ -464,37 +474,6 @@ } ] }, - "Ry_5Y-BK7HrkIc6X0JG1m": { - "title": "Bitbucket", - "description": "Bitbucket is a web-based version control repository hosting service owned by Atlassian. It primarily uses Git version control systems, offering both cloud-hosted and self-hosted options. Bitbucket provides features such as pull requests for code review, branch permissions, and inline commenting on code. It integrates seamlessly with other Atlassian products like Jira and Trello, making it popular among teams already using Atlassian tools. Bitbucket supports continuous integration and deployment through Bitbucket Pipelines. It offers unlimited private repositories for small teams, making it cost-effective for smaller organizations.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Bitbucket", - "url": "https://bitbucket.org/product", - "type": "article" - }, - { - "title": "Overview of Bitbucket", - "url": "https://bitbucket.org/product/guides/getting-started/overview#a-brief-overview-of-bitbucket", - "type": "article" - }, - { - "title": "Using Git with Bitbucket Cloud", - "url": "https://www.atlassian.com/git/tutorials/learn-git-with-bitbucket-cloud", - "type": "article" - }, - { - "title": "Explore top posts about Bitbucket", - "url": "https://app.daily.dev/tags/bitbucket?ref=roadmapsh", - "type": "article" - }, - { - "title": "Bitbucket tutorial | How to use Bitbucket Cloud", - "url": "https://www.youtube.com/watch?v=M44nEyd_5To", - "type": "video" - } - ] - }, "Wcp-VDdFHipwa7hNAp1z_": { "title": "GitLab", "description": "GitLab is a web-based DevOps platform that provides a complete solution for the software development lifecycle. It offers source code management, continuous integration/continuous deployment (CI/CD), issue tracking, and more, all integrated into a single application. GitLab supports Git repositories and includes features like merge requests (similar to GitHub's pull requests), wiki pages, and issue boards. It emphasizes DevOps practices, providing built-in CI/CD pipelines, container registry, and Kubernetes integration. GitLab offers both cloud-hosted and self-hosted options, giving organizations flexibility in deployment. Its all-in-one approach differentiates it from competitors, as it includes features that might require multiple tools in other ecosystems. GitLab's focus on the entire DevOps lifecycle, from planning to monitoring, makes it popular among enterprises and teams seeking a unified platform for their development workflows.\n\nVisit the following resources to learn more:", @@ -854,21 +833,10 @@ } ] }, - "SYXJhanu0lFmGj2m2XXhS": { - "title": "Profiling Perfor.", - "description": "Profiling performance involves analyzing a system or application's behavior to identify bottlenecks, inefficiencies, and areas for optimization. This process typically involves collecting detailed information about resource usage, such as CPU and memory consumption, I/O operations, and execution time of functions or methods. Profiling tools can provide insights into how different parts of the code contribute to overall performance, highlighting slow or resource-intensive operations. By understanding these performance characteristics, developers can make targeted improvements, optimize code paths, and enhance system responsiveness and scalability. Profiling is essential for diagnosing performance issues and ensuring that applications meet desired performance standards.\n\nLearn more from the following resources:", - "links": [ - { - "title": "How to Profile SQL Queries for Better Performance", - "url": "https://servebolt.com/articles/profiling-sql-queries/", - "type": "article" - }, - { - "title": "Performance Profiling", - "url": "https://www.youtube.com/watch?v=MaauQTeGg2k", - "type": "video" - } - ] + "profiling-performance@SYXJhanu0lFmGj2m2XXhS.md": { + "title": "Profiling Performance", + "description": "", + "links": [] }, "bQnOAu863hsHdyNMNyJop": { "title": "N+1 Problem", @@ -1022,22 +990,6 @@ } ] }, - "dLY0KafPstajCcSbslC4M": { - "title": "HATEOAS", - "description": "HATEOAS (Hypermedia As The Engine Of Application State) is a constraint of RESTful architecture that allows clients to navigate an API dynamically through hypermedia links provided in responses. Instead of hard-coding URLs or endpoints, the client discovers available actions through these links, much like a web browser following links on a webpage. This enables greater flexibility and decouples clients from server-side changes, making the system more adaptable and scalable without breaking existing clients. It's a key element of REST's principle of statelessness and self-descriptive messages.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is HATEOAS and why is it important for my REST API?", - "url": "https://restcookbook.com/Basics/hateoas/", - "type": "article" - }, - { - "title": "What happened to HATEOAS", - "url": "https://www.youtube.com/watch?v=HNTSrytKCoQ", - "type": "video" - } - ] - }, "sNceS4MpSIjRkWhNDmrFg": { "title": "JSON APIs", "description": "JSON or JavaScript Object Notation is an encoding scheme that is designed to eliminate the need for an ad-hoc code for each application to communicate with servers that communicate in a defined way. JSON API module exposes an implementation for data stores and data structures, such as entity types, bundles, and fields.\n\nVisit the following resources to learn more:", @@ -1059,32 +1011,6 @@ } ] }, - "9cD5ag1L0GqHx4_zxc5JX": { - "title": "Open API Specs", - "description": "The OpenAPI Specification (OAS), formerly known as Swagger, is a standard for defining and documenting RESTful APIs. It provides a structured format in YAML or JSON to describe API endpoints, request and response formats, authentication methods, and other metadata. By using OAS, developers can create a comprehensive and machine-readable API description that facilitates client generation, automated documentation, and testing. This specification promotes consistency and clarity in API design, enhances interoperability between different systems, and enables tools to generate client libraries, server stubs, and interactive API documentation.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAPI Specification Website", - "url": "https://swagger.io/specification/", - "type": "article" - }, - { - "title": "Open API Live Editor", - "url": "https://swagger.io/tools/swagger-editor/", - "type": "article" - }, - { - "title": "OpenAPI 3.0: How to Design and Document APIs with the Latest OpenAPI Specification 3.0", - "url": "https://www.youtube.com/watch?v=6kwmW_p_Tig", - "type": "video" - }, - { - "title": " REST API and OpenAPI: It’s Not an Either/Or Question", - "url": "https://www.youtube.com/watch?v=pRS9LRBgjYg", - "type": "video" - } - ] - }, "sSNf93azjuyMzQqIHE0Rh": { "title": "SOAP", "description": "Simple Object Access Protocol (SOAP) is a message protocol for exchanging information between systems and applications. When it comes to application programming interfaces (APIs), a SOAP API is developed in a more structured and formalized way. SOAP messages can be carried over a variety of lower-level protocols, including the web-related Hypertext Transfer Protocol (HTTP).\n\nVisit the following resources to learn more:", @@ -1184,89 +1110,15 @@ } ] }, - "KWTbEVX_WxS8jmSaAX3Fe": { - "title": "Client Side", - "description": "Client-side caching is a technique where web browsers or applications store data locally on the user's device to improve performance and reduce server load. It involves saving copies of web pages, images, scripts, and other resources on the client's system for faster access on subsequent visits. Modern browsers implement various caching mechanisms, including HTTP caching (using headers like Cache-Control and ETag), service workers for offline functionality, and local storage APIs. Client-side caching significantly reduces network traffic and load times, enhancing user experience, especially on slower connections. However, it requires careful management to balance improved performance with the need for up-to-date content. Developers must implement appropriate cache invalidation strategies and consider cache-busting techniques for critical updates. Effective client-side caching is crucial for creating responsive, efficient web applications while minimizing server resource usage.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Client Side Caching", - "url": "https://redis.io/docs/latest/develop/use/client-side-caching/", - "type": "article" - }, - { - "title": "Everything you need to know about HTTP Caching", - "url": "https://www.youtube.com/watch?v=HiBDZgTNpXY", - "type": "video" - } - ] - }, - "Nq2BO53bHJdFT1rGZPjYx": { - "title": "CDN", - "description": "A Content Delivery Network (CDN) service aims to provide high availability and performance improvements of websites. This is achieved with fast delivery of website assets and content typically via geographically closer endpoints to the client requests.\n\nTraditional commercial CDNs (Amazon CloudFront, Akamai, CloudFlare and Fastly) provide servers across the globe which can be used for this purpose. Serving assets and contents via a CDN reduces bandwidth on website hosting, provides an extra layer of caching to reduce potential outages and can improve website security as well\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "CloudFlare - What is a CDN? | How do CDNs work?", - "url": "https://www.cloudflare.com/en-ca/learning/cdn/what-is-a-cdn/", - "type": "article" - }, - { - "title": "AWS - CDN", - "url": "https://aws.amazon.com/what-is/cdn/", - "type": "article" - }, - { - "title": "What is Cloud CDN?", - "url": "https://www.youtube.com/watch?v=841kyd_mfH0", - "type": "video" - }, - { - "title": "What is a CDN and how does it work?", - "url": "https://www.youtube.com/watch?v=RI9np1LWzqw", - "type": "video" - } - ] - }, - "z1-eP4sV75GBEIdM4NvL9": { - "title": "Server Side", - "description": "Server-side caching is a technique used to improve application performance by storing frequently accessed data in memory on the server, reducing the need for repeated data retrieval or computation. This approach helps to speed up response times and reduce the load on databases and other backend services. Common methods include caching database query results, HTML fragments, and API responses. Popular server-side caching tools and technologies include Redis, Memcached, and built-in caching mechanisms in web frameworks. By efficiently managing and serving cached content, server-side caching enhances scalability and responsiveness of applications.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Server-side caching and Client-side caching", - "url": "https://www.codingninjas.com/codestudio/library/server-side-caching-and-client-side-caching", - "type": "article" - }, - { - "title": "Caching strategies", - "url": "https://medium.com/@genchilu/cache-strategy-in-backend-d0baaacd2d79", - "type": "article" - }, - { - "title": "Local vs distributed", - "url": "https://redis.io/glossary/distributed-caching/", - "type": "article" - }, - { - "title": "Explore top posts about Web Development", - "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", - "type": "article" - } - ] + "http-caching@KWTbEVX_WxS8jmSaAX3Fe.md": { + "title": "HTTP Caching", + "description": "", + "links": [] }, - "ELj8af7Mi38kUbaPJfCUR": { + "caching@uPjCrDGA2MHylWXbZvMBM.md": { "title": "Caching", - "description": "Caching is a technique used in computing to store and retrieve frequently accessed data quickly, reducing the need to fetch it from the original, slower source repeatedly. It involves keeping a copy of data in a location that's faster to access than its primary storage. Caching can occur at various levels, including browser caching, application-level caching, and database caching. It significantly improves performance by reducing latency, decreasing network traffic, and lowering the load on servers or databases. Common caching strategies include time-based expiration, least recently used (LRU) algorithms, and write-through or write-back policies. While caching enhances speed and efficiency, it also introduces challenges in maintaining data consistency and freshness. Effective cache management is crucial in balancing performance gains with the need for up-to-date information in dynamic systems.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is Caching - AWS", - "url": "https://aws.amazon.com/caching/", - "type": "article" - }, - { - "title": "Caching - Cloudflare", - "url": "https://www.cloudflare.com/learning/cdn/what-is-caching/", - "type": "article" - } - ] + "description": "", + "links": [] }, "RBrIP5KbVQ2F0ly7kMfTo": { "title": "Web Security", @@ -1424,193 +1276,10 @@ } ] }, - "6XIWO0MoE-ySl4qh_ihXa": { - "title": "GOF Design Patterns", - "description": "The Gang of Four (GoF) Design Patterns are a collection of 23 foundational software design patterns that provide solutions to common object-oriented design problems. These patterns are grouped into three categories: _Creational_ (focused on object creation like Singleton and Factory), _Structural_ (focused on class and object composition like Adapter and Composite), and _Behavioral_ (focused on communication between objects like Observer and Strategy). Each pattern offers a proven template for addressing specific design challenges, promoting code reusability, flexibility, and maintainability across software systems.\n\nLearn more from the following links:", - "links": [ - { - "title": "Gangs of Four (GoF) Design Patterns", - "url": "https://www.digitalocean.com/community/tutorials/gangs-of-four-gof-design-patterns", - "type": "article" - }, - { - "title": "Design Patterns for Humans", - "url": "https://github.com/kamranahmedse/design-patterns-for-humans", - "type": "article" - }, - { - "title": "Tutorial - Builder Pattern (Gang of Four Design Patterns Series)", - "url": "https://www.youtube.com/watch?v=_sa2WlAFWQos", - "type": "video" - } - ] - }, - "u8IRw5PuXGUcmxA0YYXgx": { - "title": "CQRS", - "description": "CQRS (Command Query Responsibility Segregation) is an architectural pattern that separates read and write operations for a data store. In this pattern, \"commands\" handle data modification (create, update, delete), while \"queries\" handle data retrieval. The principle behind CQRS is that for many systems, especially complex ones, the requirements for reading data differ significantly from those for writing data. By separating these concerns, CQRS allows for independent scaling, optimization, and evolution of the read and write sides. This can lead to improved performance, scalability, and security. CQRS is often used in event-sourced systems and can be particularly beneficial in high-performance, complex domain applications. However, it also introduces additional complexity and should be applied judiciously based on the specific needs and constraints of the system.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "CQRS Pattern", - "url": "https://docs.microsoft.com/en-us/azure/architecture/patterns/cqrs", - "type": "article" - }, - { - "title": "Learn CQRS Pattern in 5 minutes!", - "url": "https://www.youtube.com/watch?v=eiut3FIY1Cg", - "type": "video" - } - ] - }, - "BvHi5obg0L1JDZFKBzx9t": { - "title": "Domain Driven Design", - "description": "Domain-Driven Design (DDD) is a software development approach that focuses on creating a deep understanding of the business domain and using this knowledge to inform the design of software systems. It emphasizes close collaboration between technical and domain experts to develop a shared language (ubiquitous language) and model that accurately represents the core concepts and processes of the business. DDD promotes organizing code around business concepts (bounded contexts), using rich domain models to encapsulate business logic, and separating the domain logic from infrastructure concerns. Key patterns in DDD include entities, value objects, aggregates, repositories, and domain services. This approach aims to create more maintainable and flexible software systems that closely align with business needs and can evolve with changing requirements. DDD is particularly valuable for complex domains where traditional CRUD-based architectures may fall short in capturing the nuances and rules of the business.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Domain-Driven Design", - "url": "https://redis.com/glossary/domain-driven-design-ddd/", - "type": "article" - }, - { - "title": "Explore top posts about Domain-Driven Design", - "url": "https://app.daily.dev/tags/domain-driven-design?ref=roadmapsh", - "type": "article" - }, - { - "title": "Domain Driven Design: What You Need To Know", - "url": "https://www.youtube.com/watch?v=4rhzdZIDX_k", - "type": "video" - } - ] - }, - "wqE-mkxvehOzOv8UyE39p": { - "title": "Event Sourcing", - "description": "Event sourcing is a design pattern in which the state of a system is represented as a sequence of events that have occurred over time. In an event-sourced system, changes to the state of the system are recorded as events and stored in an event store. The current state of the system is derived by replaying the events from the event store. One of the main benefits of event sourcing is that it provides a clear and auditable history of all the changes that have occurred in the system. This can be useful for debugging and for tracking the evolution of the system over time.Event sourcing is often used in conjunction with other patterns, such as Command Query Responsibility Segregation (CQRS) and domain-driven design, to build scalable and responsive systems with complex business logic. It is also useful for building systems that need to support undo/redo functionality or that need to integrate with external systems.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Event Sourcing - Martin Fowler", - "url": "https://martinfowler.com/eaaDev/EventSourcing.html", - "type": "article" - }, - { - "title": "Explore top posts about Architecture", - "url": "https://app.daily.dev/tags/architecture?ref=roadmapsh", - "type": "article" - }, - { - "title": "Event Sourcing 101", - "url": "https://www.youtube.com/watch?v=lg6aF5PP4Tc", - "type": "video" - } - ] - }, - "I-PUAE2AzbEaUkW9vMaUM": { - "title": "Test Driven Development", - "description": "Test driven development (TDD) is the process of writing tests for software's requirements which will fail until the software is developed to meet those requirements. Once those tests pass, then the cycle repeats to refactor code or develop another feature/requirement. In theory, this ensures that software is written to meet requirements in the simplest form, and avoids code defects.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is Test Driven Development (TDD)?", - "url": "https://www.guru99.com/test-driven-development.html", - "type": "article" - }, - { - "title": "Test-driven development", - "url": "https://www.ibm.com/garage/method/practices/code/practice_test_driven_development/", - "type": "article" - }, - { - "title": "Explore top posts about TDD", - "url": "https://app.daily.dev/tags/tdd?ref=roadmapsh", - "type": "article" - }, - { - "title": "Test-Driven Development", - "url": "https://www.youtube.com/watch?v=Jv2uxzhPFl4", - "type": "video" - } - ] - }, - "Ke522R-4k6TDeiDRyZbbU": { - "title": "Monolithic Apps", - "description": "Monolithic applications are designed as a single, cohesive unit where all components—such as user interface, business logic, and data access—are tightly integrated and run as a single service. This architecture simplifies development and deployment since the entire application is managed and deployed together. However, it can lead to challenges with scalability, maintainability, and agility as the application grows. Changes to one part of the application may require redeploying the entire system, and scaling might necessitate duplicating the entire application rather than scaling individual components. Monolithic architectures can be suitable for smaller applications or projects with less complex requirements, but many organizations transition to microservices or modular architectures to address these limitations as they scale.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Pattern: Monolithic Architecture", - "url": "https://microservices.io/patterns/monolithic.html", - "type": "article" - }, - { - "title": "Monolithic Architecture - Advantages & Disadvantages", - "url": "https://datamify.medium.com/monolithic-architecture-advantages-and-disadvantages-e71a603eec89", - "type": "article" - }, - { - "title": "Monolithic vs Microservice Architecture", - "url": "https://www.youtube.com/watch?v=NdeTGlZ__Do", - "type": "video" - } - ] - }, - "nkmIv3dNwre4yrULMgTh3": { - "title": "Serverless", - "description": "Serverless computing is a cloud computing model where developers build and run applications without managing server infrastructure. In this model, cloud providers handle the server management, scaling, and maintenance tasks. Developers deploy code in the form of functions, which are executed in response to events or triggers, and are billed based on the actual usage rather than reserved capacity. This approach simplifies development by abstracting infrastructure concerns, enabling automatic scaling, and reducing operational overhead. Common serverless platforms include AWS Lambda, Google Cloud Functions, and Azure Functions, which support a range of event-driven applications and microservices.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Serverless", - "url": "https://www.ibm.com/cloud/learn/serverless", - "type": "article" - }, - { - "title": "AWS Services", - "url": "https://aws.amazon.com/serverless/", - "type": "article" - }, - { - "title": "Explore top posts about Serverless", - "url": "https://app.daily.dev/tags/serverless?ref=roadmapsh", - "type": "article" - }, - { - "title": "Serverless Computing in 100 Seconds", - "url": "https://www.youtube.com/watch?v=W_VV2Fx32_Y&ab_channel=Fireship", - "type": "video" - } - ] - }, - "K55h3aqOGe6-hgVhiFisT": { - "title": "Microservices", - "description": "Microservices is an architectural style that structures an application as a collection of loosely coupled, independently deployable services. Each microservice focuses on a specific business capability and communicates with others via lightweight protocols, typically HTTP or messaging queues. This approach allows for greater scalability, flexibility, and resilience, as services can be developed, deployed, and scaled independently. Microservices also facilitate the use of diverse technologies and languages for different components, and they support continuous delivery and deployment. However, managing microservices involves complexity in terms of inter-service communication, data consistency, and deployment orchestration.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Pattern: Microservice Architecture", - "url": "https://microservices.io/patterns/microservices.html", - "type": "article" - }, - { - "title": "What is Microservices?", - "url": "https://smartbear.com/solutions/microservices/", - "type": "article" - }, - { - "title": "Microservices 101", - "url": "https://thenewstack.io/microservices-101/", - "type": "article" - }, - { - "title": "Articles about Microservices", - "url": "https://thenewstack.io/category/microservices/", - "type": "article" - }, - { - "title": "Explore top posts about Microservices", - "url": "https://app.daily.dev/tags/microservices?ref=roadmapsh", - "type": "article" - }, - { - "title": "Microservices explained in 5 minutes", - "url": "https://www.youtube.com/watch?v=lL_j7ilk7rc", - "type": "video" - } - ] + "monolith@Ke522R-4k6TDeiDRyZbbU.md": { + "title": "Monolith", + "description": "", + "links": [] }, "n14b7sfTOwsjKTpFC9EZ2": { "title": "Service Mesh", @@ -1810,27 +1479,6 @@ } ] }, - "SGVwJme-jT_pbOTvems0v": { - "title": "Containerization vs Virtualization", - "description": "Containerization and virtualization are both technologies for isolating and running multiple applications on shared hardware, but they differ significantly in approach and resource usage. Virtualization creates separate virtual machines (VMs), each with its own operating system, running on a hypervisor. This provides strong isolation but consumes more resources. Containerization, exemplified by Docker, uses a shared operating system kernel to create isolated environments (containers) for applications. Containers are lighter, start faster, and use fewer resources than VMs. They're ideal for microservices architectures and rapid deployment. Virtualization offers better security isolation and is suitable for running different operating systems on the same hardware. Containerization provides greater efficiency and scalability, especially for cloud-native applications. The choice between them depends on specific use cases, security requirements, and infrastructure needs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Containerization vs. Virtualization: Everything you need to know", - "url": "https://middleware.io/blog/containerization-vs-virtualization/", - "type": "article" - }, - { - "title": "Explore top posts about Containers", - "url": "https://app.daily.dev/tags/containers?ref=roadmapsh", - "type": "article" - }, - { - "title": "Virtual Machine (VM) vs Docker", - "url": "https://www.youtube.com/watch?v=a1M_thDTqmU", - "type": "video" - } - ] - }, "sVuIdAe08IWJVqAt4z-ag": { "title": "WebSockets", "description": "WebSockets provide a protocol for full-duplex, real-time communication between a client (usually a web browser) and a server over a single, long-lived connection. Unlike traditional HTTP, which requires multiple request-response cycles to exchange data, WebSockets establish a persistent connection that allows for continuous data exchange in both directions. This enables efficient real-time interactions, such as live chat, online gaming, and real-time updates on web pages. WebSocket connections start with an HTTP handshake, then upgrade to a WebSocket protocol, facilitating low-latency communication and reducing overhead compared to HTTP polling or long polling.\n\nVisit the following resources to learn more:", @@ -1977,26 +1625,10 @@ } ] }, - "fekyMpEnaGqjh1Cu4Nyc4": { - "title": "Web Servers", - "description": "Web servers are software or hardware systems that handle requests from clients (usually web browsers) and serve web content, such as HTML pages, images, and other resources. They process incoming HTTP or HTTPS requests, interact with application servers or databases if needed, and send the appropriate response back to the client. Popular web servers include Apache HTTP Server, Nginx, and Microsoft Internet Information Services (IIS). Web servers are essential for hosting websites and web applications, managing traffic, and ensuring reliable access to online resources by handling concurrent connections, serving static and dynamic content, and providing security features like SSL/TLS encryption.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is a Web Server? - Mozilla", - "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_web_server", - "type": "article" - }, - { - "title": "What is a Web Server?", - "url": "https://www.hostinger.co.uk/tutorials/what-is-a-web-server", - "type": "article" - }, - { - "title": "Web Server Concepts and Examples", - "url": "https://youtu.be/9J1nJOivdyw", - "type": "video" - } - ] + "learn-about-web-servers@fekyMpEnaGqjh1Cu4Nyc4.md": { + "title": "Learn about Web Servers", + "description": "", + "links": [] }, "SHmbcMRsc3SygEDksJQBD": { "title": "Building For Scale", @@ -2254,43 +1886,6 @@ } ] }, - "f7iWBkC0X7yyCoP_YubVd": { - "title": "Migration Strategies", - "description": "Migration strategies involve planning and executing the transition of applications, data, or infrastructure from one environment to another, such as from on-premises systems to the cloud or between different cloud providers. Key strategies include:\n\n1. **Rehost (Lift and Shift)**: Moving applications as-is to the new environment with minimal changes, which is often the quickest but may not fully leverage new platform benefits.\n2. **Replatform**: Making some optimizations or changes to adapt applications for the new environment, enhancing performance or scalability while retaining most of the existing architecture.\n3. **Refactor**: Redesigning and modifying applications to optimize for the new environment, often taking advantage of new features and improving functionality or performance.\n4. **Repurchase**: Replacing existing applications with new, often cloud-based, solutions that better meet current needs.\n5. **Retain**: Keeping certain applications or systems in their current environment due to specific constraints or requirements.\n6. **Retire**: Decommissioning applications that are no longer needed or are redundant.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Databases as a Challenge for Continuous Delivery", - "url": "https://phauer.com/2015/databases-challenge-continuous-delivery/", - "type": "article" - }, - { - "title": "AWS Cloud Migration Strategies", - "url": "https://www.youtube.com/watch?v=9ziB82V7qVM", - "type": "video" - } - ] - }, - "osQlGGy38xMcKLtgZtWaZ": { - "title": "Types of Scaling", - "description": "Horizontal scaling (scaling out/in) involves adding or removing instances of resources, such as servers or containers, to handle increased or decreased loads. It distributes the workload across multiple instances to improve performance and redundancy. This method enhances the system's capacity by expanding the number of nodes in a distributed system.\n\nVertical scaling (scaling up/down) involves increasing or decreasing the resources (CPU, memory, storage) of a single instance or server to handle more load or reduce capacity. This method improves performance by upgrading the existing hardware or virtual machine but has limits based on the maximum capacity of the individual resource.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Horizontal vs Vertical Scaling", - "url": "https://touchstonesecurity.com/horizontal-vs-vertical-scaling-what-you-need-to-know/", - "type": "article" - }, - { - "title": "Vertical Vs Horizontal Scaling: Key Differences You Should Know", - "url": "https://www.youtube.com/watch?v=dvRFHG2-uYs", - "type": "video" - }, - { - "title": "System Design 101", - "url": "https://www.youtube.com/watch?v=Y-Gl4HEyeUQ", - "type": "video" - } - ] - }, "4X-sbqpP0NDhM99bKdqIa": { "title": "Instrumentation", "description": "Instrumentation, monitoring, and telemetry are critical components for ensuring system reliability and performance. _Instrumentation_ refers to embedding code or tools within applications to capture key metrics, logs, and traces. _Monitoring_ involves observing these metrics in real time to detect anomalies, failures, or performance issues, often using dashboards and alerting systems. _Telemetry_ is the automated collection and transmission of this data from distributed systems, enabling visibility into system behavior. Together, these practices provide insights into the health, usage, and performance of systems, aiding in proactive issue resolution and optimizing overall system efficiency.\n\nVisit the following resources to learn more:", @@ -3056,37 +2651,10 @@ } ] }, - "osvajAJlwGI3XnX0fE-kA": { - "title": "Long Polling", - "description": "Long polling is a technique where the client polls the server for new data. However, if the server does not have any data available for the client, instead of sending an empty response, the server holds the request and waits for some specified period of time for new data to be available. If new data becomes available during that time, the server immediately sends a response to the client, completing the open request. If no new data becomes available and the timeout period specified by the client expires, the server sends a response indicating that fact. The client will then immediately re-request data from the server, creating a new request-response cycle.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Long Polling", - "url": "https://javascript.info/long-polling", - "type": "article" - }, - { - "title": "What is Long Polling?", - "url": "https://www.youtube.com/watch?v=LD0_-uIsnOE", - "type": "video" - } - ] - }, - "Tt7yr-ChHncJG0Ge1f0Xk": { - "title": "Short Polling", - "description": "Short polling is a technique where a client periodically sends requests to a server at regular intervals to check for updates or new data. The server responds with the current state or any changes since the last request. While simple to implement and compatible with most HTTP infrastructures, short polling can be inefficient due to the frequent network requests and potential for increased latency in delivering updates. It contrasts with long polling and WebSockets, which offer more efficient mechanisms for real-time communication. Short polling is often used when real-time requirements are less stringent and ease of implementation is a priority.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Amazon SQS Short and Long Polling", - "url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html", - "type": "article" - }, - { - "title": "Short Polling vs Long Polling vs WebSockets", - "url": "https://www.youtube.com/watch?v=ZBM28ZPlin8", - "type": "video" - } - ] + "long--short-polling@osvajAJlwGI3XnX0fE-kA.md": { + "title": "Long / Short Polling", + "description": "", + "links": [] }, "M0iaSSdVPWaCUpyTG50Vf": { "title": "Redis", @@ -3239,63 +2807,15 @@ } ] }, - "WiAK70I0z-_bzbWNwiHUd": { - "title": "TimeScale", - "description": "TimescaleDB is an open-source, time-series database built as an extension to PostgreSQL. It is designed to handle large volumes of time-stamped data efficiently, making it suitable for applications that require high-performance analytics on time-series data, such as monitoring systems, IoT applications, and financial services. TimescaleDB leverages PostgreSQL’s features while providing additional capabilities for time-series data, including efficient data ingestion, advanced time-based queries, and automatic data partitioning (hypertables). It supports complex queries and aggregations, making it a powerful tool for analyzing trends and patterns in time-series data.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Timescale Website", - "url": "https://www.timescale.com/", - "type": "article" - }, - { - "title": "Tutorial - TimeScaleDB Explained in 100 Seconds", - "url": "https://www.youtube.com/watch?v=69Tzh_0lHJ8", - "type": "video" - }, - { - "title": "What is Time Series Data?", - "url": "https://www.youtube.com/watch?v=Se5ipte9DMY", - "type": "video" - } - ] + "timescaledb@WiAK70I0z-_bzbWNwiHUd.md": { + "title": "TimescaleDB", + "description": "", + "links": [] }, - "gT6-z2vhdIQDzmR2K1g1U": { + "cassandra@zsiZLWJ2bMvrjuHch5fX_.md": { "title": "Cassandra", - "description": "Apache Cassandra is a highly scalable, distributed NoSQL database designed to handle large amounts of structured data across multiple commodity servers. It provides high availability with no single point of failure, offering linear scalability and proven fault-tolerance on commodity hardware or cloud infrastructure. Cassandra uses a masterless ring architecture, where all nodes are equal, allowing for easy data distribution and replication. It supports flexible data models and can handle both unstructured and structured data. Cassandra excels in write-heavy environments and is particularly suitable for applications requiring high throughput and low latency. Its data model is based on wide column stores, offering a more complex structure than key-value stores. Widely used in big data applications, Cassandra is known for its ability to handle massive datasets while maintaining performance and reliability.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Apache Cassandra", - "url": "https://cassandra.apache.org/_/index.html", - "type": "article" - }, - { - "title": "article@Cassandra - Quick Guide", - "url": "https://www.tutorialspoint.com/cassandra/cassandra_quick_guide.htm", - "type": "article" - }, - { - "title": "Explore top posts about Backend Development", - "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", - "type": "article" - }, - { - "title": "Apache Cassandra - Course for Beginners", - "url": "https://www.youtube.com/watch?v=J-cSy5MeMOA", - "type": "video" - } - ] - }, - "QZwTLOvjUTaSb_9deuxsR": { - "title": "Base", - "description": "Oracle Base Database Service enables you to maintain absolute control over your data while using the combined capabilities of Oracle Database and Oracle Cloud Infrastructure. Oracle Base Database Service offers database systems (DB systems) on virtual machines. They are available as single-node DB systems and multi-node RAC DB systems on Oracle Cloud Infrastructure (OCI). You can manage these DB systems by using the OCI Console, the OCI API, the OCI CLI, the Database CLI (DBCLI), Enterprise Manager, or SQL Developer.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Base Database Documentation", - "url": "https://docs.oracle.com/en-us/iaas/base-database/index.html", - "type": "article" - } - ] + "description": "", + "links": [] }, "5xy66yQrz1P1w7n6PcAFq": { "title": "AWS Neptune", @@ -3354,20 +2874,106 @@ } ] }, - "ZsZvStCvKwFhlBYe9HGhl": { - "title": "Migrations", - "description": "Database migrations are a version-controlled way to manage and apply incremental changes to a database schema over time, allowing developers to modify the database structure (e.g., adding tables, altering columns) without affecting existing data. They ensure that the database evolves alongside application code in a consistent, repeatable manner across environments (e.g., development, testing, production), while maintaining compatibility with older versions of the schema. Migrations are typically written in SQL or a database-agnostic language, and are executed using migration tools like Liquibase, Flyway, or built-in ORM features such as Django or Rails migrations.\n\nLearn more from the following resources:", + "frontend-basics@oyg5g4-cY5EBEUgVkjnL3.md": { + "title": "Frontend Basics", + "description": "", + "links": [] + }, + "html@9-pCsW650T1mfj5dmRB9L.md": { + "title": "HTML", + "description": "", + "links": [] + }, + "css@utA1W2O6pzoV_LbtDE5DN.md": { + "title": "CSS", + "description": "", + "links": [] + }, + "javascript@An2lMuJEkkpL0cfw4RrSl.md": { + "title": "JavaScript", + "description": "", + "links": [] + }, + "K55h3aqOGe6-hgVhiFisT": { + "title": "Microservices", + "description": "Microservices is an architectural style that structures an application as a collection of loosely coupled, independently deployable services. Each microservice focuses on a specific business capability and communicates with others via lightweight protocols, typically HTTP or messaging queues. This approach allows for greater scalability, flexibility, and resilience, as services can be developed, deployed, and scaled independently. Microservices also facilitate the use of diverse technologies and languages for different components, and they support continuous delivery and deployment. However, managing microservices involves complexity in terms of inter-service communication, data consistency, and deployment orchestration.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Pattern: Microservice Architecture", + "url": "https://microservices.io/patterns/microservices.html", + "type": "article" + }, + { + "title": "What is Microservices?", + "url": "https://smartbear.com/solutions/microservices/", + "type": "article" + }, + { + "title": "Microservices 101", + "url": "https://thenewstack.io/microservices-101/", + "type": "article" + }, + { + "title": "Articles about Microservices", + "url": "https://thenewstack.io/category/microservices/", + "type": "article" + }, + { + "title": "Explore top posts about Microservices", + "url": "https://app.daily.dev/tags/microservices?ref=roadmapsh", + "type": "article" + }, + { + "title": "Microservices explained in 5 minutes", + "url": "https://www.youtube.com/watch?v=lL_j7ilk7rc", + "type": "video" + } + ] + }, + "nkmIv3dNwre4yrULMgTh3": { + "title": "Serverless", + "description": "Serverless computing is a cloud computing model where developers build and run applications without managing server infrastructure. In this model, cloud providers handle the server management, scaling, and maintenance tasks. Developers deploy code in the form of functions, which are executed in response to events or triggers, and are billed based on the actual usage rather than reserved capacity. This approach simplifies development by abstracting infrastructure concerns, enabling automatic scaling, and reducing operational overhead. Common serverless platforms include AWS Lambda, Google Cloud Functions, and Azure Functions, which support a range of event-driven applications and microservices.\n\nVisit the following resources to learn more:", "links": [ { - "title": "What are Database Migrations?", - "url": "https://www.prisma.io/dataguide/types/relational/what-are-database-migrations", + "title": "Serverless", + "url": "https://www.ibm.com/cloud/learn/serverless", + "type": "article" + }, + { + "title": "AWS Services", + "url": "https://aws.amazon.com/serverless/", + "type": "article" + }, + { + "title": "Explore top posts about Serverless", + "url": "https://app.daily.dev/tags/serverless?ref=roadmapsh", "type": "article" }, { - "title": "Database Migrations for Beginners", - "url": "https://www.youtube.com/watch?v=dJDBP7pPA-o", + "title": "Serverless Computing in 100 Seconds", + "url": "https://www.youtube.com/watch?v=W_VV2Fx32_Y&ab_channel=Fireship", "type": "video" } ] + }, + "clickhouse@ZyGLSvx17p7QmYDy1LFbM.md": { + "title": "ClickHouse", + "description": "", + "links": [] + }, + "scylladb@aArZ3gKwObzafCkTOd-Hj.md": { + "title": "ScyllaDB", + "description": "", + "links": [] + }, + "dgraph@GKrcTsUi5XWj_pP1TOK0S.md": { + "title": "DGraph", + "description": "", + "links": [] + }, + "migrations@MOLAXgs0CMCT7o84L0EaK.md": { + "title": "Migrations", + "description": "", + "links": [] } } \ No newline at end of file diff --git a/public/roadmap-content/blockchain.json b/public/roadmap-content/blockchain.json index d4863f1fb3c8..873e08c39f74 100644 --- a/public/roadmap-content/blockchain.json +++ b/public/roadmap-content/blockchain.json @@ -622,22 +622,6 @@ } ] }, - "PkRAYBZQAUAHxWEeCCX4U": { - "title": "Huobi Eco Chain", - "description": "Huobi's ECO Chain (also known as HECO) is a public blockchain that provides developers with a low-cost onchain environment for running decentralized apps (dApps) of smart contracts and storing digital assets.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Introduction to HECO Chain", - "url": "https://docs.hecochain.com/#/", - "type": "article" - }, - { - "title": "Huobi Eco Chain whitepaper", - "url": "https://www.hecochain.com/developer.133bd45.pdf", - "type": "article" - } - ] - }, "txQ9U1wcnZkQVh6B49krk": { "title": "Avalanche", "description": "Avalanche describes itself as an “open, programmable smart contracts platform for decentralized applications.” What does that mean? Like many other decentralized protocols, Avalanche has its own token called AVAX, which is used to pay transaction fees and can be staked to secure the network.\n\nVisit the following resources to learn more:", diff --git a/public/roadmap-content/computer-science.json b/public/roadmap-content/computer-science.json index 3425ab9271af..52094bcde800 100644 --- a/public/roadmap-content/computer-science.json +++ b/public/roadmap-content/computer-science.json @@ -4284,5 +4284,15 @@ "type": "video" } ] + }, + "small-o@2cg5PogENPhiYFXQnV9xC.md": { + "title": "Small O", + "description": "", + "links": [] + }, + "small-omega@dUBRG_5aUYlICsjPbRlTf.md": { + "title": "Small Omega", + "description": "", + "links": [] } } \ No newline at end of file diff --git a/public/roadmap-content/data-analyst.json b/public/roadmap-content/data-analyst.json index 38cf20ee6d47..dd78d1462952 100644 --- a/public/roadmap-content/data-analyst.json +++ b/public/roadmap-content/data-analyst.json @@ -472,75 +472,11 @@ } ] }, - "8OXmF2Gn6TYJotBRvDjqA": { - "title": "Pandas", - "description": "Pandas is a widely acknowledged and highly useful data manipulation library in the world of data analysis. Known for its robust features like data cleaning, wrangling and analysis, pandas has become one of the go-to tools for data analysts. Built on NumPy, it provides high-performance, easy-to-use data structures and data analysis tools. In essence, its flexibility and versatility make it a critical part of the data analyst's toolkit, as it holds the capability to cater to virtually every data manipulation task.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Pandas", - "url": "https://pandas.pydata.org/", - "type": "article" - }, - { - "title": "NumPy vs Pandas", - "url": "https://www.youtube.com/watch?v=KHoEbRH46Zk", - "type": "video" - } - ] - }, "l1SnPc4EMqGdaIAhIQfrT": { "title": "Data Visualisation Libraries", "description": "Data visualization libraries are crucial in data science for transforming complex datasets into clear and interpretable visual representations, facilitating better understanding and communication of data insights. In Python, several libraries are widely used for this purpose. Matplotlib is a foundational library that offers comprehensive tools for creating static, animated, and interactive plots. Seaborn, built on top of Matplotlib, provides a high-level interface for drawing attractive and informative statistical graphics with minimal code. Plotly is another powerful library that allows for the creation of interactive and dynamic visualizations, which can be easily embedded in web applications. Additionally, libraries like Bokeh and Altair offer capabilities for creating interactive plots and dashboards, enhancing exploratory data analysis and the presentation of data findings. Together, these libraries enable data scientists to effectively visualize trends, patterns, and outliers in their data, making the analysis more accessible and actionable.\n\nLearn more from the following resources:", "links": [] }, - "uGkXxdMXUMY-3fQFS1jK8": { - "title": "Matplotlib", - "description": "Matplotlib is a paramount data visualization library used extensively by data analysts for generating a wide array of plots and graphs. Through Matplotlib, data analysts can convey results clearly and effectively, driving insights from complex data sets. It offers a hierarchical environment which is very natural for a data scientist to work with. Providing an object-oriented API, it allows for extensive customization and integration into larger applications. From histograms, bar charts, scatter plots to 3D graphs, the versatility of Matplotlib assists data analysts in the better comprehension and compelling representation of data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Matplotlib", - "url": "https://matplotlib.org/", - "type": "article" - }, - { - "title": "Learn Matplotlib in 6 minutes", - "url": "https://www.youtube.com/watch?v=nzKy9GY12yo", - "type": "video" - } - ] - }, - "y__UHXe2DD-IB7bvMF1-X": { - "title": "Dplyr", - "description": "Dplyr is a powerful and popular toolkit for data manipulation in R. As a data analyst, this library provides integral functions to manipulate, clean, and process data efficiently. It has been designed to be easy and intuitive, ensuring a robust and consistent syntax. Dplyr ensures data reliability and fast processing, essential for analysts dealing with large datasets. With a strong focus on efficiency, dplyr functions like select, filter, arrange, mutate, summarise, and group\\_by optimise data analysis operations, making data manipulation a smoother and hassle-free procedure for data analysts.\n\nLearn more from the following resources:", - "links": [ - { - "title": "dplyr", - "url": "https://dplyr.tidyverse.org/", - "type": "article" - }, - { - "title": "Dplyr Essentials", - "url": "https://www.youtube.com/watch?v=Gvhkp-Yw65U", - "type": "video" - } - ] - }, - "E0hIgQEeZlEidr4HtUFrL": { - "title": "Ggplot2", - "description": "When it comes to data visualization in R programming, ggplot2 stands tall as one of the primary tools for data analysts. This data visualization library, which forms part of the tidyverse suite of packages, facilitates the creation of complex and sophisticated visual narratives. With its grammar of graphics philosophy, ggplot2 enables analysts to build graphs and charts layer by layer, thereby offering detailed control over graphical features and design. Its versatility in creating tailored and aesthetically pleasing graphics is a vital asset for any data analyst tackling exploratory data analysis, reporting, or dashboard building.\n\nLearn more from the following resources:", - "links": [ - { - "title": "ggplot2", - "url": "https://ggplot2.tidyverse.org/", - "type": "article" - }, - { - "title": "Make beautiful graphs in R", - "url": "https://www.youtube.com/watch?v=qnw1xDnt_Ec", - "type": "video" - } - ] - }, "_sjXCLHHTbZromJYn6fnu": { "title": "Data Collection", "description": "Data collection is a foundational process that entails gathering relevant data from various sources. This data can be quantitative or qualitative and may be sourced from databases, online platforms, customer feedback, among others. The gathered information is then cleaned, processed, and interpreted to extract meaningful insights. A data analyst performs this whole process carefully, as the quality of data is paramount to ensuring accurate analysis, which in turn informs business decisions and strategies. This highlights the importance of an excellent understanding, proper tools, and precise techniques when it comes to data collection in data analysis.\n\nLearn more from the following resources:", @@ -814,22 +750,6 @@ } ] }, - "yn1sstYMO9du3rpfQqNs9": { - "title": "Average", - "description": "When focusing on data analysis, understanding key statistical concepts is crucial. Amongst these, central tendency is a foundational element. Central Tendency refers to the measure that determines the center of a distribution. The average is a commonly used statistical tool by which data analysts discern trends and patterns. As one of the most recognized forms of central tendency, figuring out the \"average\" involves summing all values in a data set and dividing by the number of values. This provides analysts with a 'typical' value, around which the remaining data tends to cluster, facilitating better decision-making based on existing data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "How to Calculate the Average", - "url": "https://support.microsoft.com/en-gb/office/calculate-the-average-of-a-group-of-numbers-e158ef61-421c-4839-8290-34d7b1e68283#:~:text=Average%20This%20is%20the%20arithmetic,by%206%2C%20which%20is%205.", - "type": "article" - }, - { - "title": "Average Formula", - "url": "https://www.cuemath.com/average-formula/", - "type": "article" - } - ] - }, "tSxtyJhL5wjU0XJcjsJmm": { "title": "Range", "description": "The concept of Range refers to the spread of a dataset, primarily in the realm of statistics and data analysis. This measure is crucial for a data analyst as it provides an understanding of the variability amongst the numbers within a dataset. Specifically in a role such as Data Analyst, understanding the range and dispersion aids in making more precise analyses and predictions. Understanding the dispersion within a range can highlight anomalies, identify standard norms, and form the foundation for statistical conclusions like the standard deviation, variance, and interquartile range. It allows for the comprehension of the reliability and stability of particular datasets, which can help guide strategic decisions in many industries. Therefore, range is a key concept that every data analyst must master.\n\nLearn more from the following resources:", diff --git a/public/roadmap-content/devops.json b/public/roadmap-content/devops.json index 7e3f302a97e7..a7b046a6b323 100644 --- a/public/roadmap-content/devops.json +++ b/public/roadmap-content/devops.json @@ -2193,32 +2193,6 @@ } ] }, - "-pGF3soruWWxwE4LxE5Vk": { - "title": "Travis CI", - "description": "Travis CI is a cloud-based continuous integration (CI) service that automatically builds and tests code changes in GitHub repositories. It helps streamline the software development process by automatically running tests and building applications whenever code is pushed or a pull request is made. Travis CI supports a variety of programming languages and provides integration with other tools and services, offering features like build matrix configurations, deployment pipelines, and notifications. Its ease of setup and integration with GitHub makes it a popular choice for open-source and private projects looking to implement CI/CD practices.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Travis CI Documentation", - "url": "https://docs.travis-ci.com/", - "type": "article" - }, - { - "title": "Travis CI Tutorial", - "url": "https://docs.travis-ci.com/user/tutorial/", - "type": "article" - }, - { - "title": "Explore top posts about CI/CD", - "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", - "type": "article" - }, - { - "title": "Travis CI Complete Tutorial for DevOps Engineers", - "url": "https://www.youtube.com/watch?v=xLWDOLhTH38", - "type": "video" - } - ] - }, "1-JneOQeGhox-CKrdiquq": { "title": "Circle CI", "description": "CircleCI is a popular continuous integration and continuous delivery (CI/CD) platform that automates the build, test, and deployment processes of software projects. It supports a wide range of programming languages and integrates with various version control systems, primarily GitHub and Bitbucket. CircleCI uses a YAML configuration file to define pipelines, allowing developers to specify complex workflows, parallel job execution, and custom environments. It offers features like caching, artifact storage, and Docker layer caching to speed up builds. With its cloud-based and self-hosted options, CircleCI provides scalable solutions for projects of all sizes, helping teams improve code quality, accelerate release cycles, and streamline their development workflows.\n\nVisit the following resources to learn more:", @@ -2240,26 +2214,10 @@ } ] }, - "TsXFx1wWikVBVoFUUDAMx": { - "title": "Drone", - "description": "Drone is an open-source continuous integration (CI) platform built on container technology. It automates building, testing, and deploying code using a simple, YAML-based pipeline configuration stored alongside the source code. Drone executes each step of the CI/CD process in isolated Docker containers, ensuring consistency and reproducibility. It supports multiple version control systems, offers parallel execution of pipeline steps, and provides plugins for integrating with various tools and services. Drone's lightweight, scalable architecture makes it suitable for projects of all sizes, from small teams to large enterprises. Its focus on simplicity and containerization aligns well with modern DevOps practices and microservices architectures.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Drone", - "url": "https://www.drone.io/", - "type": "article" - }, - { - "title": "Drone Documentation", - "url": "https://docs.drone.io/", - "type": "article" - }, - { - "title": "Drone CI Quickstart", - "url": "https://www.youtube.com/watch?v=Qf8EHRzAgHQ", - "type": "video" - } - ] + "octopus-deploy@TsXFx1wWikVBVoFUUDAMx.md": { + "title": "Octopus Deploy", + "description": "", + "links": [] }, "L000AbzF3oLcn4B1eUIYX": { "title": "TeamCity", @@ -3221,31 +3179,10 @@ } ] }, - "wNguM6-YEznduz3MgBCYo": { - "title": "Application Monitoring", - "description": "Application monitoring involves the continuous observation and analysis of software applications to ensure they perform optimally, identify issues, and provide insights into their operation. This process includes tracking metrics such as response times, error rates, resource utilization (CPU, memory, and disk), and transaction performance. Application monitoring tools collect and analyze data to detect anomalies, provide alerts for potential problems, and offer detailed insights into application behavior and performance. By monitoring applications, organizations can proactively address issues, optimize performance, and improve user experience, ultimately ensuring reliability and efficiency in their software systems.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Applying Basic vs. Advanced Monitoring Techniques", - "url": "https://thenewstack.io/applying-basic-vs-advanced-monitoring-techniques/", - "type": "article" - }, - { - "title": "Why Legacy Apps Need Your Monitoring Love, Too", - "url": "https://thenewstack.io/why-legacy-apps-need-your-monitoring-love-too/", - "type": "article" - }, - { - "title": "Explore top posts about Monitoring", - "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", - "type": "article" - }, - { - "title": "Application Monitoring - 4 Golden Signals", - "url": "https://www.youtube.com/watch?v=PHcnmTdVPT0", - "type": "video" - } - ] + "observability@wNguM6-YEznduz3MgBCYo.md": { + "title": "Observability", + "description": "", + "links": [] }, "8rd7T5ahK2I_zh5co-IF-": { "title": "Jaeger", @@ -3361,5 +3298,25 @@ "type": "video" } ] + }, + "eso@lUUJAEBrGJvL8dRs2n1GD.md": { + "title": "ESO", + "description": "", + "links": [] + }, + "dynatrace@4aJVaimsuvGIPXMZ_WjaA.md": { + "title": "Dynatrace", + "description": "", + "links": [] + }, + "salt@Kumwd6XOlEMeDohDH0q9P.md": { + "title": "Salt", + "description": "", + "links": [] + }, + "openshift@3GryoQuI67JTHg9r3xUHO.md": { + "title": "OpenShift", + "description": "", + "links": [] } } \ No newline at end of file diff --git a/public/roadmap-content/docker.json b/public/roadmap-content/docker.json index b570db9cd4e3..827cabc79355 100644 --- a/public/roadmap-content/docker.json +++ b/public/roadmap-content/docker.json @@ -378,22 +378,6 @@ } ] }, - "HlTxLqKNFMhghtKF6AcWu": { - "title": "Interactive Test Environments", - "description": "Docker allows you to create isolated, disposable environments that can be deleted once you're done with testing. This makes it much easier to work with third party software, test different dependencies or versions, and quickly experiment without the risk of damaging your local setup.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Launch a Dev Environment", - "url": "https://docs.docker.com/desktop/dev-environments/create-dev-env/", - "type": "article" - }, - { - "title": "Test Environments - Medium", - "url": "https://manishsaini74.medium.com/containerized-testing-orchestrating-test-environments-with-docker-5201bfadfdf2", - "type": "article" - } - ] - }, "YzpB7rgSR4ueQRLa0bRWa": { "title": "Command Line Utilities", "description": "Docker images can include command line utilities or standalone applications that we can run inside containers.\n\nVisit the following resources to learn more:", diff --git a/public/roadmap-content/engineering-manager.json b/public/roadmap-content/engineering-manager.json index 26fcea2140c2..981b9000d0ec 100644 --- a/public/roadmap-content/engineering-manager.json +++ b/public/roadmap-content/engineering-manager.json @@ -468,29 +468,29 @@ "description": "An Engineering Manager's role in production issues management is crucial. They are responsible for quick decision making during system down-times or service disruptions. They deploy resources efficiently to resolve issues, sometimes guiding the team in real-time to troubleshoot and fix the problem.\n\nKey challenges include downtime minimization, maintaining system availability, and making trade-offs between quick fixes and long-term solutions. They address these challenges by implementing strong incident management policies and training the team for effective system recovery processes.\n\nSuccess in this aspect requires a mix of technical skills, effective communication, and problem-solving abilities. They also need a solid understanding of the deployed systems and infrastructure to ensure seamless functionality and service availability. It's crucial to learn from each outage to prevent or handle similar occurrences in the future.", "links": [] }, - "mIUx8zAHWyPWPGvxuTK4y": { - "title": "Contingency planning", - "description": "An Engineering Manager needs to ensure that their team is prepared for any unexpected situations or challenges - that's where contingency planning comes into play. It's the manager's responsibility to guide their team in developing robust plans that address potential risks and uncertainties. This includes identifying possible obstacles, evaluating their impact, and devising strategies to mitigate them.\n\nThe challenges this role faces are manifold, from predicting the unknown to dealing with a resistant team. To navigate these, cultivating an open and flexible team culture is crucial. By fostering a problem-solving mentality, the manager can encourage their team to see contingency planning as a tool, not a burden.\n\nTo successfully play this role, an Engineering Manager needs to have strong risk management and strategic thinking skills. They must be able to balance a long-term view with immediate, tactical decisions. They should also be comfortable leading difficult conversations about potential failures and mishaps.", + "burnout-prevention@mIUx8zAHWyPWPGvxuTK4y.md": { + "title": "Burnout prevention", + "description": "", "links": [] }, - "nnoVA8W70hrNDxN3XQCVL": { - "title": "Disaster recovery", - "description": "An Engineering Manager plays a critical part in disaster recovery. It is their job to ensure that, if any failure occurs, the team can quickly get systems up and running again. They devise and oversee the implementation of a sturdy disaster recovery plan. This often involves risk assessment, data backups, and establishing rapid recovery processes.\n\nChallenges they may face include dealing with data loss and service disruptions. To face these, an Engineering Manager often relies on a good strategy, clear communication, and effective coordination. They align the team and ensure everyone knows their role in the recovery process.\n\nIt requires strong leadership, risk management, technical knowledge, and problem-solving skills. Regular testing of the recovery plan is also essential to identify loopholes and ensure the effectiveness of the strategies in place.", + "critical-situation-leadership@nnoVA8W70hrNDxN3XQCVL.md": { + "title": "Critical situation leadership", + "description": "", "links": [] }, - "FwK-B7jRbBXVnuY9JxI1w": { - "title": "Business continuity", - "description": "An Engineering Manager plays a pivotal role in the domain of business continuity. This involves ensuring that the various aspects of technological and process frameworks are resilient to disruptions. The aim is to sustain core business operations during times of crisis.\n\nKey responsibilities include setting up robust risk management systems, executing incident-response plans, and ensuring data integrity during downtime. It's a challenge to maintain operational resilience without stinting ongoing projects, and managing it involves a delicate balance of resources.\n\nTo achieve this, Engineering Managers must possess excellent problem-solving skills and a clear understanding of business operation needs. Regular risk assessment and sharpening the team's skill set to adapt and respond to uncertainty quickly are essential strategies. Robust infrastructure, policy planning, and good leadership are underlying requirements to render effective business continuity.", + "emergency-staffing@FwK-B7jRbBXVnuY9JxI1w.md": { + "title": "Emergency staffing", + "description": "", "links": [] }, - "QFhhOgwz_bgZgOfKFg5XA": { - "title": "Security incident handling", - "description": "For an Engineering Manager, handling security incidents within a team involves keen attention to detail and quick actions. Their key responsibilities include coordinating with the security team to manage the issue and ensure minimal disruption to the project. They also facilitate communications, keeping all stakeholders informed about the situation and the steps being taken.\n\nChallenges faced by the Engineering Manager include managing team stress levels during security incidents and ensuring swift return to normal operations post-incident. By skillfully juggling these tasks, the manager can help secure the team's trust and keep the project on track.\n\nTo successfully handle security incidents, an Engineering Manager needs active decision-making skills, a solid understanding of security protocols, and strong team leadership capabilities. The ability to react calmly and decisively under pressure is also essential.", + "stress-management@QFhhOgwz_bgZgOfKFg5XA.md": { + "title": "Stress management", + "description": "", "links": [] }, - "tmY4Ktu6luFg5wKylJW76": { - "title": "Production issues management", - "description": "As an Engineering Manager, handling production issues is one of the vital responsibilities. This includes timeliness in identifying, troubleshooting, and resolving problems. They may be involved in the actual debugging, but most of their tasks involve coordinating the team and defining procedures for a swift response to any issues.\n\nAddressing these issues can be challenging, particularly if they disrupt essential services or products. The manager needs to communicate effectively with the team and stakeholders, manage expectations, and ensure minimal interruption of services.\n\nTo excel in production issues management, an Engineering Manager needs valuable skills. These include technical knowledge, critical thinking, decision-making, and strong communication skills. Also, experience with certain tools, like monitoring software, could be beneficial to quickly detect and resolve issues.", + "work-life-balance-during-crises@tmY4Ktu6luFg5wKylJW76.md": { + "title": "Work-life balance during crises", + "description": "", "links": [] }, "5MM1ccB1pmQcd3Uyjmbr7": { @@ -568,11 +568,6 @@ "description": "The role of an Engineering Manager extends to external collaboration as well. Here, they often serve the role of liaising with external teams, vendors, or partners, aligning goals and ensuring smooth communication flow. The key responsibilities include managing relationships, understanding the partner ecosystem, and negotiating win-win situations.\n\nEngineering Managers face challenges like cultural differences, communication hurdles, or time zone disparities. They address these by building reliability through regular updates, clear agendas, and understanding each other's work culture.\n\nTo succeed, Engineering Managers need good interpersonal skills, a keen eye for future opportunities, and the ability to adapt quickly. An understanding of business and sales, alongside engineering knowledge, can be advantageous too. This role needs balance - drive details when necessary and step back and delegate when appropriate.", "links": [] }, - "TQY4hjo56rDdlbzjs_-nl": { - "title": "Competitive Analysis", - "description": "An Engineering Manager uses competitive analysis to understand market trends and competitor strategies. This aids in decision-making and strategic planning. Their key responsibilities include identifying key competitors, analyzing their products, sales, and marketing strategies.\n\nChallenges may arise from having incomplete or inaccurate data. In these cases, Engineering Managers have to rely on their judgement and experience. Their analysis should be unbiased and as accurate as possible to influence the right design and development strategies.\n\nSuccessful competitive analysis requires strong analytical skills, keen attention to detail, and the ability to understand complex market dynamics. Managers must stay updated on market trend, technological advancements and be able to distinguish their company's unique selling proposition. This will allow them to plan steps to maintain competitiveness in the market.", - "links": [] - }, "QUxpEK8smXRBs2gMdDInB": { "title": "Legacy System Retirement", "description": "Every Engineering Manager knows the value and hurdles of legacy system retirement. They must plan and manage this complex task with a keen understanding of the system's purpose, its interdependencies, and potential risks of its retirement. Key responsibilities include assessing the impact on users, mitigating downtime, and ensuring business continuity.\n\nChallenges often arise from lack of documentation or knowledge about the legacy system. To overcome this, they could organize knowledge-sharing sessions with long-standing team members, assessing external help, or gradual transition methods.\n\nThe successful retirement of a legacy system requires a comprehensive approach, good interpersonal skills for team collaboration, and strong decision-making skills. An Engineering Manager has to balance the system’s business value against the cost and risk of maintaining it.", diff --git a/public/roadmap-content/frontend.json b/public/roadmap-content/frontend.json index 36b7ee497702..9b5acd1e7506 100644 --- a/public/roadmap-content/frontend.json +++ b/public/roadmap-content/frontend.json @@ -197,121 +197,6 @@ } ] }, - "PCirR2QiFYO89Fm-Ev3o1": { - "title": "Learn the basics", - "description": "HTML (HyperText Markup Language) is the backbone of webpages. It structures the content you see online. You use CSS to style this HTML structure and JavaScript to make it interactive. Think of HTML as the skeleton of a website.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "W3Schools: Learn HTML", - "url": "https://www.w3schools.com/html/html_intro.asp", - "type": "article" - }, - { - "title": "web.dev: Learn HTML", - "url": "https://web.dev/learn/html", - "type": "article" - }, - { - "title": "HTML Full Course - Build a Website Tutorial", - "url": "https://www.youtube.com/watch?v=kUMe1FH4CHE", - "type": "video" - } - ] - }, - "z8-556o-PaHXjlytrawaF": { - "title": "Writing Semantic HTML", - "description": "Semantic HTML uses markup to convey the meaning of web content, not just its appearance, by employing elements like `
`, `