diff --git a/blog/Spark-Architecture/index.md b/blog/Spark-Architecture/index.md deleted file mode 100644 index 92338b86..00000000 --- a/blog/Spark-Architecture/index.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -title: "Spark Architecture Explained" -authors: [Aditya-Singh-Rathore] -tags: [Apache Spark, Spark Architecture, Big Data, Distributed Computing, Data Engineering] -date: 2025-08-18 - -description: Apache Spark is a fast, open-source big data framework that leverages in-memory computing for high performance. Its architecture powers scalable distributed processing across clusters, making it essential for analytics and machine learning. - -draft: false -canonical_url: -# meta: -# - name: "robots" -# content: "index, follow" -# - property: "og:title" -# content: "What is Google DeepMind AI?" -# - property: "og:description" -# content: "DeepMind is an auxiliary of Google that centers around man-made brainpower. All the more explicitly, it utilizes a part of AI called AI" -# - property: "og:type" -# content: "article" -# - property: "og:url" -# content: "/blog/getting-started-with-mern" -# - property: "og:image" -# content: "/assets/images/mern-8a27add30515e58f789f89a4c9072818.jpg" -# - name: "twitter:card" -# content: "summary_large_image" -# - name: "twitter:title" -# content: "A Comprehensive Guide to Get You Started with MERN Stack" -# - name: "twitter:description" -# content: "DeepMind is an auxiliary of Google that centers around man-made brainpower. All the more explicitly, it utilizes a part of AI called AI" -# - name: "twitter:image" -# content: "assets/images/mern-8a27add30515e58f789f89a4c9072818.jpg" - ---- - -# Understanding Apache Spark Architecture: A Deep Dive into Distributed Computing - -Hey there, fellow data enthusiasts! 👋 - -I remember the first time I encountered a Spark architecture diagram. It looked like a complex web of boxes and arrows that seemed to communicate in some secret distributed computing language. But once I understood what each component actually does and how they work together, everything clicked into place. - -Today, I want to walk you through Spark's architecture in a way that I wish someone had explained it to me back then - focusing on the core components and how this beautiful system actually works under the hood. - -## What is Apache Spark? - -Before diving into the architecture, let's establish what we're dealing with. Apache Spark is an open-source, distributed computing framework designed to process massive datasets across clusters of computers. Think of it as a coordinator that can take your data processing job and intelligently distribute it across multiple machines to get the work done faster. - -The key insight that makes Spark special? It keeps data in memory between operations whenever possible, which is why it can be dramatically faster than traditional batch processing systems. - -## The Big Picture: High-Level Architecture - -![Spark Architecture](/img/blogs/07-spark_architecture.png) - - -When you look at Spark's architecture, you're essentially looking at a well-orchestrated system with three main types of components working together: - -1. **Driver Program** - The mastermind that coordinates everything -2. **Cluster Manager** - The resource allocator -3. **Executors** - The workers that do the actual processing - -Let's break down each of these and understand how they collaborate. - -## Core Components Deep Dive - -### 1. The Driver Program: Your Application's Brain - -The Driver Program is where your Spark application begins and ends. When you write a Spark program and run it, you're essentially creating a driver program. Here's what makes it the brain of the operation: - -**What the Driver Does:** -- Contains your main() function and defines RDDs and operations on them -- Converts your high-level operations into a DAG (Directed Acyclic Graph) of tasks -- Schedules tasks across the cluster -- Coordinates with the cluster manager to get resources -- Collects results from executors and returns final results - -**Think of it this way:** If your Spark application were a restaurant, the Driver would be the head chef who takes orders (your code), breaks them down into specific cooking tasks, assigns those tasks to kitchen staff (executors), and ensures everything comes together for the final dish. - -The driver runs in its own JVM process and maintains all the metadata about your Spark application throughout its lifetime. - -### 2. Cluster Manager: The Resource Referee - -The Cluster Manager sits between your driver and the actual compute resources. Its job is to allocate and manage resources across the cluster. Spark is flexible and works with several cluster managers: - -**Standalone Cluster Manager:** -- Spark's built-in cluster manager -- Simple to set up and understand -- Great for dedicated Spark clusters - -**Apache YARN (Yet Another Resource Negotiator):** -- Hadoop's resource manager -- Perfect if you're in a Hadoop ecosystem -- Allows resource sharing between Spark and other Hadoop applications - -**Apache Mesos:** -- A general-purpose cluster manager -- Can handle multiple frameworks beyond just Spark -- Good for mixed workload environments - -**Kubernetes:** -- The modern container orchestration platform -- Increasingly popular for new deployments -- Excellent for cloud-native environments - -**The key point:** The cluster manager's job is resource allocation - it doesn't care what your application does, just how much CPU and memory it needs. - -### 3. Executors: The Workhorses - -Executors are the processes that actually run your tasks and store data for your application. Each executor runs in its own JVM process and can run multiple tasks concurrently using threads. - -**What Executors Do:** -- Execute tasks sent from the driver -- Store computation results in memory or disk storage -- Provide in-memory storage for cached RDDs/DataFrames -- Report heartbeat and task status back to the driver - -**Key Characteristics:** -- Each executor has a fixed number of cores and amount of memory -- Executors are launched at the start of a Spark application and run for the entire lifetime -- If an executor fails, Spark can launch new ones and recompute lost data - -Think of executors as skilled workers in our restaurant analogy - they can handle multiple cooking tasks simultaneously and have their own workspace (memory) to store ingredients and intermediate results. - -## How These Components Work Together: The Execution Flow - -Now that we know the players, let's see how they orchestrate a typical Spark application: - -### Step 1: Application Submission -When you submit a Spark application, the driver program starts up and contacts the cluster manager requesting resources for executors. - -### Step 2: Resource Allocation -The cluster manager examines available resources and launches executor processes on worker nodes across the cluster. - -### Step 3: Task Planning -The driver analyzes your code and creates a logical execution plan. It breaks down operations into stages and tasks that can be executed in parallel. - -### Step 4: Task Distribution -The driver sends tasks to executors. Each task operates on a partition of data, and multiple tasks can run in parallel across different executors. - -### Step 5: Execution and Communication -Executors run the tasks, storing intermediate results and communicating progress back to the driver. The driver coordinates everything and handles any failures. - -### Step 6: Result Collection -Once all tasks complete, the driver collects results and returns the final output to your application. - -## Understanding RDDs: The Foundation - -At the heart of Spark's architecture lies the concept of Resilient Distributed Datasets (RDDs). Understanding RDDs is crucial to understanding how Spark actually works. - -**What makes RDDs special:** - -**Resilient:** RDDs can automatically recover from node failures. Spark remembers how each RDD was created (its lineage) and can rebuild lost partitions. - -**Distributed:** RDD data is automatically partitioned and distributed across multiple nodes in the cluster. - -**Dataset:** At the end of the day, it's still just a collection of your data - but with superpowers. - -### RDD Operations: Transformations vs Actions - -RDDs support two types of operations, and understanding the difference is crucial: - -**Transformations** (Lazy): -```scala -val filtered = data.filter(x => x > 10) -val mapped = filtered.map(x => x * 2) -val grouped = mapped.groupByKey() -``` -These operations don't actually execute immediately. Spark just builds up a computation graph. - -**Actions** (Eager): -```scala -val results = grouped.collect() // Brings data to driver -val count = filtered.count() // Returns number of elements -grouped.saveAsTextFile("hdfs://...") // Saves to storage -``` -Actions trigger the actual execution of all the transformations in the lineage. - -This lazy evaluation allows Spark to optimize the entire computation pipeline before executing anything. - -## The DAG: Spark's Optimization Engine - -One of Spark's most elegant features is how it converts your operations into a Directed Acyclic Graph (DAG) for optimal execution. - -### How DAG Optimization Works - -When you chain multiple transformations together, Spark doesn't execute them immediately. Instead, it builds a DAG that represents the computation. This allows for powerful optimizations: - -**Pipelining:** Multiple transformations that don't require data shuffling can be combined into a single stage and executed together. - -**Stage Boundaries:** Spark creates stage boundaries at operations that require data shuffling (like `groupByKey`, `join`, or `repartition`). - -### Stages and Tasks Breakdown - -**Stage:** A set of tasks that can all be executed without data shuffling. All tasks in a stage can run in parallel. - -**Task:** The smallest unit of work in Spark. Each task processes one partition of data. - -**Wide vs Narrow Dependencies:** -- **Narrow Dependencies:** Each partition of child RDD depends on a constant number of parent partitions (like `map`, `filter`) -- **Wide Dependencies:** Each partition of child RDD may depend on multiple parent partitions (like `groupByKey`, `join`) - -Wide dependencies create stage boundaries because they require shuffling data across the network. - -## Memory Management: Where the Magic Happens - -Spark's memory management is what gives it the speed advantage over traditional batch processing systems. Here's how it works: - -### Memory Regions - -Spark divides executor memory into several regions: - -**Storage Memory (60% by default):** -- Used for caching RDDs/DataFrames -- LRU eviction when space is needed -- Can borrow from execution memory when available - -**Execution Memory (20% by default):** -- Used for computation in shuffles, joins, sorts, aggregations -- Can borrow from storage memory when needed - -**User Memory (20% by default):** -- For user data structures and internal metadata -- Not managed by Spark - -**Reserved Memory (300MB by default):** -- System reserved memory for Spark's internal objects - -The beautiful thing about this system is that storage and execution memory can dynamically borrow from each other based on current needs. - -## The Unified Stack: Multiple APIs, One Engine - -What makes Spark truly powerful is that it provides multiple high-level APIs that all run on the same core engine: - -### Spark Core -The foundation that provides: -- Basic I/O functionality -- Task scheduling and memory management -- Fault tolerance -- RDD abstraction - -### Spark SQL -- SQL queries on structured data -- DataFrame and Dataset APIs -- Catalyst query optimizer -- Integration with various data sources - -### Spark Streaming -- Real-time stream processing -- Micro-batch processing model -- Integration with streaming sources like Kafka - -### MLlib -- Distributed machine learning algorithms -- Feature transformation utilities -- Model evaluation and tuning - -### GraphX -- Graph processing and analysis -- Built-in graph algorithms -- Graph-parallel computation - -The key insight: all of these APIs compile down to the same core RDD operations, so they all benefit from Spark's optimization engine and can interoperate seamlessly. - -## Putting It All Together - -Now that we've covered all the components, let's see how they work together in a real example: - -```scala -// This creates RDDs but doesn't execute anything yet -val textFile = spark.textFile("hdfs://large-file.txt") -val words = textFile.flatMap(line => line.split(" ")) -val wordCounts = words.map(word => (word, 1)) -val aggregated = wordCounts.reduceByKey(_ + _) - -// This action triggers execution of the entire pipeline -val results = aggregated.collect() -``` - -**What happens behind the scenes:** -1. Driver creates a DAG with two stages (split by the `reduceByKey` shuffle) -2. Driver requests executors from cluster manager -3. Stage 1 tasks (read, flatMap, map) execute on partitions across executors -4. Data gets shuffled for the `reduceByKey` operation -5. Stage 2 tasks perform the aggregation -6. Results get collected back to the driver - -## Why This Architecture Matters - -Understanding Spark's architecture isn't just academic knowledge - it's the key to working effectively with big data: - -**Fault Tolerance:** The RDD lineage graph means Spark can recompute lost data automatically without manual intervention. - -**Scalability:** The driver/executor model scales horizontally - just add more worker nodes to handle bigger datasets. - -**Efficiency:** Lazy evaluation and DAG optimization mean Spark can optimize entire computation pipelines before executing anything. - -**Flexibility:** The unified stack means you can mix SQL, streaming, and machine learning in the same application without data movement penalties. - -## Conclusion: The Beauty of Distributed Computing - -Spark's architecture represents one of the most elegant solutions to distributed computing that I've encountered. By clearly separating concerns - coordination (driver), resource management (cluster manager), and execution (executors) - Spark creates a system that's both powerful and understandable. - -The magic isn't in any single component, but in how they all work together. The driver's intelligence in creating optimal execution plans, the cluster manager's efficiency in resource allocation, and the executors' reliability in task execution combine to create something greater than the sum of its parts. - -Whether you're processing terabytes of log data, training machine learning models, or running real-time analytics, understanding this architecture will help you reason about performance, debug issues, and design better data processing solutions. - ---- - -*The next time you see a Spark architecture diagram, I hope you'll see what I see now - not a confusing web of boxes and arrows, but an elegant dance of distributed computing components working in perfect harmony. Happy Sparking! 🚀* - - \ No newline at end of file diff --git a/blog/spark-architecture/images/spark.png b/blog/spark-architecture/images/spark.png deleted file mode 100644 index 514daaa0..00000000 Binary files a/blog/spark-architecture/images/spark.png and /dev/null differ diff --git a/blog/spark-architecture/index.md b/blog/spark-architecture/index.md deleted file mode 100644 index 454946c5..00000000 --- a/blog/spark-architecture/index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Spark Architecture Explained" -sidebar_label: Spark Architecture Explanation -authors: [Aditya-Singh-Rathore, sanjay-kv] -tags: [spark, azure, design] -date: 2025-08-18 -hide_table_of_contents: true ---- -Hey there, fellow data enthusiasts! 👋 Add a comment on lines R37 to R39Add diff commentMarkdown input: edit mode selected.WritePreviewAdd a suggestionHeadingBoldItalicQuoteCodeLinkUnordered listNumbered listTask listMentionReferenceSaved repliesAdd FilesPaste, drop, or click to add filesCancelCommentStart a reviewReturn to code - -I remember the first time I encountered a Spark architecture diagram. It looked like a complex web of boxes and arrows that seemed to communicate in some secret distributed computing language. But once I understood what each component actually does and how they work together, everything clicked into place. - -Today, I want to walk you through Spark's architecture in a way that I wish someone had explained it to me back then - focusing on the core components and how this beautiful system actually works under the hood. -### 👤 Research the User - -When you’re designing a new product, it’s important to remember to research your user. This means gathering data about who will be using the product and their needs. Who are they? What do they need? What are their habits and preferences? What are their goals? What are their pain points? What are they looking for in a product? Only by understanding the user can you create a product that meets their needs and exceeds their expectations. If you design a product without understanding the user, it is likely to be unsuccessful. - -Gather data about who will be using the product and their needs. - -- ❓Who are they? -- ❓What are their goals, habits, and pain points? -- ❓What are they looking for in a product? - -To answer these questions, you need to do some research. This involves gathering data about who will be using the product and their needs. You can find this data from surveys, focus groups, interviews, and other forms of market research. Once you have this data, you can start to design a product that meets the needs of your users. - -![img1](./images/spark.png) - -### 🧩 Define the Problem - -One of the most important aspects of good design is understanding the problem that needs to be solved. Too often, people focus on the solution without taking the time to understand the problem. This can lead to misguided efforts and a lot of wasted time and energy. The best way to identify the problem is to ask a lot of questions. Try to get as much information as possible from stakeholders, users, and anyone else who might have a vested interest in the project. Once you have a good understanding of the problem, you can start looking for solutions. - - -### 💡 Ideate Solutions -Generate creative ideas to solve the problem. The first step in coming up with ideas is to understand the problem fully. What are its causes and effects? What are people currently doing to try to solve it? Once you have a good understanding of the problem, you can start brainstorming potential solutions. To generate creative ideas, it can be helpful to think about things from different angles. Try approaching the problem from different perspectives, using different methods or tools, or looking at it from a different time period. Sometimes all it takes is a fresh perspective to come up with a great solution. - -- ✅Think outside the box -- ✅Use different methods and perspectives -- ✅Brainstorm with your team or solo - -Approach problems from multiple angles. Innovation often comes from seeing something in a new light. - - -### 🛠️ Refine the Solution -Select the best idea and make it more specific. - -The best way to improve a solution is to select the best idea and make it more specific. This will help to focus the team on the most important aspects of the problem. This can be done in a number of ways, but the most effective is to break the idea down into smaller chunks that can be easily addressed. Once the smaller chunks have been defined, it becomes easier to see how they fit together and whether or not they are feasible. Making a solution more specific has several benefits. It can help to focus the team on what needs to be done and make sure that everyone is on the same page. - -- ✅Focus on high-impact features -- ✅Define clear goals -- ✅Ensure everyone is aligned - -Refining makes execution manageable and ensures the solution directly addresses the user problem.There are different types of prototypes that you can use, depending on what you want to test with users. These include low-fidelity prototypes, which are sketches or wireframes of the product; high-fidelity prototypes, which are more realistic versions of the product; and paper prototypes, which are sketches. - - -### 🧪 Develop Prototypes -Create a basic version of the solution to test with users. Prototyping is the process of creating a basic or preliminary version of a product or service to test with users. The goal of prototyping is to get feedback from potential users early in the design process so that you can make changes and improvements before you invest too much time and money in the final product. You can use prototypes for different types of products, such as websites, apps, and software. - -### Types of Prototypes: -- ✅**Low-fidelity:** Sketches, wireframes -- ✅**High-fidelity:** Interactive, realistic simulations -- ✅**Paper prototypes:** Simple, hand-drawn flows - -Prototypes help gather feedback early and avoid costly mistakes later in development. - - - -### 📣 Collect Feedback from Users - -Feedback is a crucial part of the design process. We need to collect feedback from users to make sure that the prototype solves their needs. This will help us design a better product. The feedback we collect can be qualitative or quantitative, but it is most often qualitative because it is easier to get responses; can be collected in many different ways, such as through surveys and interviews. -Test your prototype with real users to understand: - -- ✅Does it solve their needs? -- ✅Is the experience smooth and intuitive? - -Gather both **qualitative** and **quantitative** feedback through interviews, usability testing, and surveys. Use that data to iterate and improve. 🔁 - - -### 🚀 Launch the Product - -Launching a product is a tough task. Especially when it comes to public release. You have to make sure that you are ready for any feedback and criticism you might receive. -When launching your product, there are many factors that need to be taken into accounts such as feedback, the market, and the competition. There are many things that can go wrong when releasing your product publicly. However, with careful planning and taking all of these factors into account, a successful launch is possible. - -Incorporate feedback, finalize features, and release your product to the public. - -- ✅Prepare for feedback and iteration -- ✅Know your market and competitors -- ✅Stay user-focused even after launch - -A successful launch comes from ongoing improvement and close attention to your users’ evolving needs. Whether you’re a sole UX designer, part of a team, or working for a large organization, these are certain steps that you need to follow in order to achieve your goals effectively. The seven steps listed above are not easy. They take time, effort, and a strong aptitude for problem-solving. Yet, Executing the above steps correctly will maximize your chances of success, while failing to address key steps along the way could sink your whole project, with the right mentors, direction, and guidance, they can help aspiring UX designers reach their goals almost as fast as they set them. - -### 🧘‍♀️ Takeaway: Build with Users, Not Just for Them - -Whether you're a solo UX designer or working in a large team, these 7 steps are crucial. They require time, effort, and problem-solving skills—but skipping any of them can cost you the success of your product. - -With strong mentorship and direction, aspiring UX designers can move faster and more confidently in their careers. - -## ✅ Final Verdict - -If you’ve made it this far—thank you! 🙌 -I hope this guide helped you better understand how to **streamline your UX design process**. -If you found value in it, please share it with your fellow designers and friends. - -📩 Contact: **sowmiyavenkatesan611@gmail.com** f - -Happy Designing! 🎨 - \ No newline at end of file diff --git a/src/components/blogCarousel/blogCard.tsx b/src/components/blogCarousel/blogCard.tsx index e1ba5c96..cc3fac22 100644 --- a/src/components/blogCarousel/blogCard.tsx +++ b/src/components/blogCarousel/blogCard.tsx @@ -4,6 +4,7 @@ import { useState } from "react"; import { motion } from "framer-motion"; import Link from "@docusaurus/Link"; import { Card, CardContent } from "../ui/card"; +import { getAuthorNames } from "../../utils/authors"; const BlogCard = ({ type, @@ -11,70 +12,76 @@ const BlogCard = ({ title, content, imageUrl, - id + id, + authors }) => { - const [currentUrl] = useState( - typeof window !== "undefined" ? window.location.href : "" - ); + const [isHovered, setIsHovered] = useState(false); if (!id || !type) { return
data not fetched properly, imageId and entryId not found
; } + // Get category from title for demo purposes + const getCategory = (title) => { + if (title.toLowerCase().includes('design') || title.toLowerCase().includes('ux')) return 'Design'; + if (title.toLowerCase().includes('ai') || title.toLowerCase().includes('deepmind')) return 'AI & Tech'; + if (title.toLowerCase().includes('github') || title.toLowerCase().includes('git')) return 'Development'; + return 'Resources'; + }; + + const category = getCategory(title); + return ( - setIsHovered(true)} - onMouseLeave={() => setIsHovered(false)} - className="relative overflow-hidden h-full shadow-2xl border border-gray-200 dark:border-gray-700 rounded-2xl transition-all duration-300" + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + className="relative overflow-hidden h-full transition-all duration-300" + > + - - {/* Hover shimmer effect */} - - - - - -
- {date} -
-

- {title} -

-
- {content} +
+ {/* Category Badge */} +
{category}
+ + {/* Card Image */} +
+ {title} +
+ + {/* Card Content */} +
+

{title}

+

{content}

+ + {/* Card Meta */} +
+
+ 👤 + {getAuthorNames(authors || [])}
- - - - + 5 min read +
+ + {/* Read More Button */} +
+ Read Article → +
+
+
+ + ); }; diff --git a/src/components/blogCarousel/blogCarousel.css b/src/components/blogCarousel/blogCarousel.css new file mode 100644 index 00000000..65cab5c2 --- /dev/null +++ b/src/components/blogCarousel/blogCarousel.css @@ -0,0 +1,420 @@ +/* Blog Carousel Card Styles - Matching Blog Page Design */ + +/* Article Card */ +.article-card { + background: white; + border-radius: 20px; + overflow: hidden; + box-shadow: + 0 4px 20px rgba(0, 0, 0, 0.08), + 0 1px 3px rgba(0, 0, 0, 0.1); + transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1); + position: relative; + border: 1px solid rgba(255, 255, 255, 0.8); + height: 100%; + display: flex; + flex-direction: column; +} + +[data-theme='dark'] .article-card { + background: #242526; + border: 1px solid rgba(74, 85, 104, 0.3); + box-shadow: + 0 4px 20px rgba(0, 0, 0, 0.4), + 0 1px 3px rgba(0, 0, 0, 0.3); +} + +.article-card::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: linear-gradient(135deg, rgba(99, 102, 241, 0.02) 0%, rgba(168, 85, 247, 0.02) 100%); + opacity: 0; + transition: opacity 0.3s ease; + pointer-events: none; +} + +.article-card:hover::before { + opacity: 1; +} + +.card-category { + position: absolute; + top: 20px; + left: 20px; + background: linear-gradient(135deg, rgba(255, 255, 255, 0.95) 0%, rgba(255, 255, 255, 0.9) 100%); + color: #6366f1; + padding: 8px 16px; + border-radius: 25px; + font-size: 12px; + font-weight: 700; + z-index: 2; + backdrop-filter: blur(20px); + border: 1px solid rgba(99, 102, 241, 0.1); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); + transition: all 0.2s ease; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +[data-theme='dark'] .card-category { + background: linear-gradient(135deg, rgba(36, 37, 38, 0.95) 0%, rgba(26, 32, 44, 0.9) 100%); + color: #a78bfa; + border: 1px solid rgba(167, 139, 250, 0.2); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3); +} + +.article-card:hover .card-category { + background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%); + color: white; + transform: scale(1.05); +} + +[data-theme='dark'] .article-card:hover .card-category { + background: linear-gradient(135deg, #a78bfa 0%, #c084fc 100%); + color: #1a202c; +} + +.card-image { + height: 200px; + overflow: hidden; + position: relative; + background: linear-gradient(135deg, #f1f5f9 0%, #e2e8f0 100%); + flex-shrink: 0; +} + +.card-image::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(168, 85, 247, 0.1) 100%); + opacity: 0; + transition: opacity 0.3s ease; + z-index: 1; +} + +.article-card:hover .card-image::before { + opacity: 1; +} + +.card-image img { + width: 100%; + height: 100%; + object-fit: cover; + transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1); + filter: brightness(1) saturate(1); +} + +.article-card:hover .card-image img { + transform: scale(1.08); + filter: brightness(1.1) saturate(1.2); +} + +.card-content { + padding: 24px; + position: relative; + background: linear-gradient(135deg, rgba(255, 255, 255, 0.95) 0%, rgba(248, 250, 252, 0.95) 100%); + flex: 1; + display: flex; + flex-direction: column; +} + +[data-theme='dark'] .card-content { + background: linear-gradient(135deg, rgba(36, 37, 38, 0.95) 0%, rgba(26, 32, 44, 0.95) 100%); +} + +.card-title { + font-size: 18px; + font-weight: 700; + color: #1e293b; + margin: 0 0 12px 0; + line-height: 1.3; + letter-spacing: -0.01em; + transition: color 0.2s ease; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; +} + +.article-card:hover .card-title { + color: #6366f1; +} + +[data-theme='dark'] .card-title { + color: #f1f5f9; +} + +[data-theme='dark'] .article-card:hover .card-title { + color: #a78bfa; +} + +.card-description { + font-size: 14px; + color: #64748b; + line-height: 1.6; + margin: 0 0 20px 0; + display: -webkit-box; + -webkit-line-clamp: 3; + -webkit-box-orient: vertical; + overflow: hidden; + font-weight: 400; + flex: 1; +} + +[data-theme='dark'] .card-description { + color: #94a3b8; +} + +.card-meta { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 16px; + padding-bottom: 16px; + border-bottom: 1px solid rgba(226, 232, 240, 0.8); + position: relative; + gap: 12px; +} + +[data-theme='dark'] .card-meta { + border-bottom-color: rgba(74, 85, 104, 0.3); +} + +.card-meta::before { + content: ''; + position: absolute; + bottom: -1px; + left: 0; + width: 40px; + height: 2px; + background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%); + border-radius: 1px; + opacity: 0; + transition: all 0.3s ease; +} + +.article-card:hover .card-meta::before { + opacity: 1; + width: 60px; +} + +.card-author { + display: flex; + align-items: center; + gap: 8px; + flex: 1; + min-width: 0; +} + +.author-avatar { + width: 28px; + height: 28px; + border-radius: 50%; + background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%); + display: flex; + align-items: center; + justify-content: center; + font-size: 14px; + color: white; + font-weight: 600; + box-shadow: 0 2px 8px rgba(99, 102, 241, 0.3); + flex-shrink: 0; + transition: all 0.3s ease; +} + +.author-avatar:hover { + transform: scale(1.1); + box-shadow: 0 4px 12px rgba(99, 102, 241, 0.4); +} + +.author-name { + font-size: 12px; + color: #475569; + font-weight: 600; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + transition: color 0.2s ease; +} + +.author-name:hover { + color: #6366f1; +} + +[data-theme='dark'] .author-name { + color: #cbd5e1; +} + +[data-theme='dark'] .author-name:hover { + color: #a78bfa; +} + +.card-read-time { + font-size: 11px; + color: #94a3b8; + font-weight: 500; + background: rgba(148, 163, 184, 0.1); + padding: 4px 8px; + border-radius: 12px; + flex-shrink: 0; +} + +[data-theme='dark'] .card-read-time { + color: #64748b; + background: rgba(100, 116, 139, 0.2); +} + +.card-read-more { + display: inline-flex; + align-items: center; + gap: 8px; + color: #6366f1; + text-decoration: none; + font-size: 13px; + font-weight: 700; + padding: 10px 16px; + border-radius: 12px; + background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(139, 92, 246, 0.1) 100%); + border: 1px solid rgba(99, 102, 241, 0.2); + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + position: relative; + overflow: hidden; + margin-top: auto; +} + +.card-read-more::before { + content: ''; + position: absolute; + top: 0; + left: -100%; + width: 100%; + height: 100%; + background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%); + transition: left 0.3s ease; + z-index: -1; +} + +.card-read-more:hover { + color: white; + transform: translateY(-2px); + box-shadow: 0 8px 25px rgba(99, 102, 241, 0.3); +} + +.card-read-more:hover::before { + left: 0; +} + +[data-theme='dark'] .card-read-more { + color: #a78bfa; + background: linear-gradient(135deg, rgba(167, 139, 250, 0.1) 0%, rgba(192, 132, 252, 0.1) 100%); + border-color: rgba(167, 139, 250, 0.2); +} + +[data-theme='dark'] .card-read-more:hover { + color: white; +} + +/* Responsive adjustments for carousel cards */ +@media (max-width: 768px) { + .card-image { + height: 160px; + } + + .card-content { + padding: 20px; + } + + .card-title { + font-size: 16px; + margin-bottom: 10px; + } + + .card-description { + font-size: 13px; + margin-bottom: 16px; + -webkit-line-clamp: 2; + } + + .card-meta { + margin-bottom: 12px; + padding-bottom: 12px; + } + + .author-avatar { + width: 24px; + height: 24px; + font-size: 12px; + } + + .author-name { + font-size: 11px; + } + + .card-read-time { + font-size: 10px; + padding: 3px 6px; + } + + .card-read-more { + font-size: 12px; + padding: 8px 12px; + } +} + +/* Smooth page transitions */ +.page-transition-enter { + opacity: 0; + transform: translateY(20px); +} + +.page-transition-enter-active { + opacity: 1; + transform: translateY(0); + transition: opacity 0.6s ease-out, transform 0.6s ease-out; +} + +.page-transition-exit { + opacity: 1; + transform: translateY(0); +} + +.page-transition-exit-active { + opacity: 0; + transform: translateY(-20px); + transition: opacity 0.4s ease-in, transform 0.4s ease-in; +} + +/* Blog Carousel Section Enhancements */ +.blog-carousel-section { + background: linear-gradient(135deg, #f8f9ff 0%, #f0f2ff 50%, #e8ebff 100%); + padding: 60px 0; + position: relative; + overflow: hidden; +} + +[data-theme='dark'] .blog-carousel-section { + background: linear-gradient(135deg, #1b1b1d 0%, #121212 50%, #1a202c 100%); +} + +/* Enhanced Focus States */ +.article-card:focus-within { + outline: 2px solid #6366f1; + outline-offset: 4px; + border-radius: 24px; +} + +/* Smooth Link Transitions */ +a[href*="/blog/"] { + transition: all 0.3s ease; +} + +a[href*="/blog/"]:hover { + text-decoration: none; +} \ No newline at end of file diff --git a/src/components/blogCarousel/blogCarousel.tsx b/src/components/blogCarousel/blogCarousel.tsx index 352b151d..b4061bbd 100644 --- a/src/components/blogCarousel/blogCarousel.tsx +++ b/src/components/blogCarousel/blogCarousel.tsx @@ -14,6 +14,7 @@ import { useEffect, useState } from "react"; import BlogCard from "./blogCard"; import blogs from "../../database/blogs"; import Autoplay from "embla-carousel-autoplay"; +import "./blogCarousel.css"; export function BlogCarousel() { const [api, setApi] = useState(); @@ -33,6 +34,7 @@ export function BlogCarousel() { return (
+ - {blogs.map((blog) => ( - + {blogs.map((blog, index) => ( + ))} - {/* Carousel controls */} -
- + {/* Enhanced Carousel controls */} +
+
{Array.from({ length: count }).map((_, index) => (
- +
diff --git a/src/css/custom.css b/src/css/custom.css index 0c32b22d..13f8da4e 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -1225,3 +1225,60 @@ html { color: white; } + +/* ===== SECTION 14: SMOOTH PAGE TRANSITIONS ===== */ +/* Global page transition styles */ +main { + transition: opacity 0.6s ease-out, transform 0.6s ease-out; +} + +/* Blog carousel section background */ +.blog-carousel-section { + background: linear-gradient(135deg, #f8f9ff 0%, #f0f2ff 50%, #e8ebff 100%); + position: relative; + overflow: hidden; +} + +[data-theme='dark'] .blog-carousel-section { + background: linear-gradient(135deg, #1b1b1d 0%, #121212 50%, #1a202c 100%); +} + +.blog-carousel-section::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: + radial-gradient(circle at 20% 30%, rgba(99, 102, 241, 0.08) 0%, transparent 40%), + radial-gradient(circle at 80% 70%, rgba(168, 85, 247, 0.06) 0%, transparent 45%); + pointer-events: none; +} + +/* Enhanced link transitions */ +a { + transition: all 0.2s ease; +} + +/* Smooth hover effects for cards */ +.article-card, +.blog-card { + transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1); +} + +/* Page load animation */ +@keyframes pageSlideIn { + 0% { + opacity: 0; + transform: translateY(20px); + } + 100% { + opacity: 1; + transform: translateY(0); + } +} + +.page-enter { + animation: pageSlideIn 0.6s ease-out; +} diff --git a/src/database/blogs/index.tsx b/src/database/blogs/index.tsx index c774ceb3..8c87fa07 100644 --- a/src/database/blogs/index.tsx +++ b/src/database/blogs/index.tsx @@ -4,6 +4,7 @@ interface Blog { image: string; description: string; slug: string; + authors: string[]; } const blogs: Blog[] = [ @@ -14,6 +15,7 @@ const blogs: Blog[] = [ description: "User experience design can be overwhelming because of the number of factors that influence what a product should look like and how it should function.", slug: "streamline-ux-ui", + authors: ["dharshibalasubramaniyam", "sanjay-kv"], }, { @@ -23,6 +25,7 @@ const blogs: Blog[] = [ description: " Are you passionate about design and dreaming of a career in it? Or maybe you are already in the design space and looking to pivot into UI/UX? ", slug: "ux-ui-design-job", + authors: ["dharshibalasubramaniyam", "sanjay-kv"], }, { id: 3, @@ -31,6 +34,7 @@ const blogs: Blog[] = [ description: "The impact of technology on UX design is undeniable. Automation and artificial intelligence are making it easier to identify user needs and create tailored experiences.", slug: "ux-designers-ai", + authors: ["dharshibalasubramaniyam", "sanjay-kv"], }, { id: 4, @@ -39,14 +43,16 @@ const blogs: Blog[] = [ description: "DeepMind is an auxiliary of Google that centers around man-made brainpower. It utilizes a part of AI called AI", slug: "google-deepmind", + authors: ["dharshibalasubramaniyam", "sanjay-kv"], }, { id: 5, title: "What are backlinks for SEO", image: "/img/blogs/01-seo-image.png", description: - "An SEO backlink is created when one website links to another, and they’re extremely important for SEO. ", + "An SEO backlink is created when one website links to another, and they're extremely important for SEO. ", slug: "google-backlinks", + authors: ["sanjay-kv"], }, { @@ -56,15 +62,9 @@ const blogs: Blog[] = [ description: "The GitHub Copilot Coding Agent is an asynchronous software engineering agent that assists developers by suggesting code snippets", slug: "git-coding-agent", + authors: ["sanjay-kv"], }, - { - id: 7, - title: "Apache Spark Tutorial", - image: "/img/blogs/07-spark-blog-banner.png", - description: - "Apache Spark is an open-source unified analytics engine for large-scale data processing, with built-in modules for streaming, SQL, machine learning, and graph processing.", - slug: "spark-architecture", - }, + ]; -export default blogs; +export default blogs; \ No newline at end of file diff --git a/src/pages/index.tsx b/src/pages/index.tsx index 99e4793e..fab42a62 100644 --- a/src/pages/index.tsx +++ b/src/pages/index.tsx @@ -1,4 +1,4 @@ -import React from "react"; +import React, { useEffect } from "react"; import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; import Layout from "@theme/Layout"; import type { ReactNode } from "react"; @@ -18,6 +18,23 @@ import FAQs from "../components/faqs/faqs"; export default function Home(): ReactNode { const { siteConfig } = useDocusaurusContext(); + + useEffect(() => { + // Add page transition animation on mount + const mainElement = document.querySelector('main'); + if (mainElement) { + mainElement.style.opacity = '0'; + mainElement.style.transform = 'translateY(20px)'; + mainElement.style.transition = 'opacity 0.6s ease-out, transform 0.6s ease-out'; + + // Trigger animation after a brief delay + setTimeout(() => { + mainElement.style.opacity = '1'; + mainElement.style.transform = 'translateY(0)'; + }, 100); + } + }, []); + return ( {/* ✅ Wrap in solid background to fix light mode */} -
-
+
+
-
+
diff --git a/src/utils/authors.ts b/src/utils/authors.ts new file mode 100644 index 00000000..a7e97355 --- /dev/null +++ b/src/utils/authors.ts @@ -0,0 +1,22 @@ +export const authorsMap = { + "ajay-dhangar": "Ajay Dhangar", + "sanjay-kv": "Sanjay Viswanthan", + "hitesh-gahanolia": "Hitesh Gahanolia", + "dharshibalasubramaniyam": "Sowmiya Venketashan", + "abhijith-m-s": "Abhijith M S", + "khushi-kalra": "Khushi Kalra", + "nayanikamukherjee": "Nayanika Mukherjee", + "pujan-sarkar": "Pujan Sarkar", + "mohitmuktikant": "Mohit Muktikant", + "santhosh-siddhardha": "Lingamuneni Santhosh Siddhardha", + "akshitha-chiluka": "Akshitha Chiluka", + "Aditya-Singh-Rathore": "Aditya Singh Rathore", +}; + +export const getAuthorNames = (authorIds: string[]): string => { + const firstNames = authorIds.map(id => { + const fullName = authorsMap[id] || id; + return fullName.split(' ')[0]; + }).slice(0, 2); + return firstNames.length > 1 ? firstNames.join(" & ") : firstNames[0] || "RecodeHive"; +}; \ No newline at end of file