From 41693bfeecfd82ab25f5e19c286b8cc8d53544ce Mon Sep 17 00:00:00 2001 From: Mohammad Naim Faizy Date: Thu, 9 Apr 2026 13:13:09 +1000 Subject: [PATCH 1/3] feat: add shared module quiz system --- .github/agents/module-quiz-generator.agent.md | 79 ++ README.md | 44 +- docs/Project-Layout-and-Structure.md | 33 +- docs/Quiz-System-Guide.md | 142 +++ quiz-banks/ai.quiz.json | 798 ++++++++++++++++ quiz-banks/auth.quiz.json | 861 +++++++++++++++++ quiz-banks/bigo.quiz.json | 841 +++++++++++++++++ quiz-banks/datastructures.quiz.json | 530 +++++++++++ quiz-banks/devops.quiz.json | 833 +++++++++++++++++ quiz-banks/git.quiz.json | 861 +++++++++++++++++ quiz-banks/javascript.quiz.json | 555 +++++++++++ quiz-banks/nextjs.quiz.json | 861 +++++++++++++++++ quiz-banks/nodejs.quiz.json | 864 +++++++++++++++++ quiz-banks/python.quiz.json | 827 +++++++++++++++++ quiz-banks/react.quiz.json | 844 +++++++++++++++++ quiz-banks/rxjs.quiz.json | 854 +++++++++++++++++ quiz-banks/systemdesign.quiz.json | 855 +++++++++++++++++ quiz-banks/typescript.quiz.json | 876 ++++++++++++++++++ src/components/Sidebar.tsx | 68 +- src/features/ai/AIFundamentalsPage.tsx | 3 + src/features/auth/AuthPage.tsx | 3 + src/features/bigo/BigOPage.tsx | 4 + .../datastructures/DataStructuresPage.tsx | 4 + src/features/devops/DevOpsPage.tsx | 3 + src/features/git/GitPage.tsx | 4 + src/features/javascript/JavaScriptPage.tsx | 3 + src/features/nextjs/NextjsPage.tsx | 3 + src/features/nodejs/NodeJSPage.tsx | 3 + src/features/python/PythonPage.tsx | 4 + src/features/react/ReactPage.tsx | 4 + src/features/rxjs/RxJSPage.tsx | 3 + .../systemdesign/SystemDesignPage.tsx | 7 +- src/features/typescript/TypeScriptPage.tsx | 4 + .../components/quiz/ModuleQuizSection.tsx | 545 +++++++++++ .../components/quiz/QuestionRenderer.tsx | 279 ++++++ src/shared/constants/moduleNavigation.ts | 472 ++++++++++ src/shared/hooks/useQuizSession.ts | 341 +++++++ src/types/quiz.ts | 102 ++ src/utils/quiz.ts | 321 +++++++ 39 files changed, 13686 insertions(+), 52 deletions(-) create mode 100644 .github/agents/module-quiz-generator.agent.md create mode 100644 docs/Quiz-System-Guide.md create mode 100644 quiz-banks/ai.quiz.json create mode 100644 quiz-banks/auth.quiz.json create mode 100644 quiz-banks/bigo.quiz.json create mode 100644 quiz-banks/datastructures.quiz.json create mode 100644 quiz-banks/devops.quiz.json create mode 100644 quiz-banks/git.quiz.json create mode 100644 quiz-banks/javascript.quiz.json create mode 100644 quiz-banks/nextjs.quiz.json create mode 100644 quiz-banks/nodejs.quiz.json create mode 100644 quiz-banks/python.quiz.json create mode 100644 quiz-banks/react.quiz.json create mode 100644 quiz-banks/rxjs.quiz.json create mode 100644 quiz-banks/systemdesign.quiz.json create mode 100644 quiz-banks/typescript.quiz.json create mode 100644 src/shared/components/quiz/ModuleQuizSection.tsx create mode 100644 src/shared/components/quiz/QuestionRenderer.tsx create mode 100644 src/shared/constants/moduleNavigation.ts create mode 100644 src/shared/hooks/useQuizSession.ts create mode 100644 src/types/quiz.ts create mode 100644 src/utils/quiz.ts diff --git a/.github/agents/module-quiz-generator.agent.md b/.github/agents/module-quiz-generator.agent.md new file mode 100644 index 0000000..fb7065c --- /dev/null +++ b/.github/agents/module-quiz-generator.agent.md @@ -0,0 +1,79 @@ +--- +name: "Module Quiz Generator" +description: "Use when generating, expanding, or updating module quiz banks in quiz-banks/{module}.quiz.json, especially for scenario-based questions, duplicate detection, and answer/reference deduplication." +tools: [read, search, edit] +user-invocable: true +agents: [] +--- + +You are a specialist at maintaining module quiz banks for Code Executives. + +## Scope +- Your primary target is the root-level file `quiz-banks/{module}.quiz.json`. +- You may read module content from `src/features/{module}/**` and shared metadata from `src/shared/constants/moduleNavigation.ts`. +- You should not modify runtime code unless the parent task explicitly asks for schema changes. + +## Non-Negotiable Rules +- Always read the entire existing quiz bank before proposing any update. +- Never create a duplicate question. +- Never create a duplicate answer set for a near-identical scenario. +- Preserve existing ids for unchanged questions. +- When appending new questions, continue the sequence with the next module-specific id, for example `javascript-q32` after `javascript-q31`. +- Use only valid section labels that already exist for that module in `src/shared/constants/moduleNavigation.ts`. +- Keep questions scenario-based and non-trivial. +- Maintain the repository quiz-bank schema exactly. + +## Required Bank Schema +Each bank must remain a single JSON object with: +- `moduleId` +- `moduleTitle` +- `description` +- `version` +- `timeLimitMinutes` +- `questionsPerAttempt` +- `questions` + +Each question must include: +- `id` +- `type` +- `difficulty` +- `scenario` +- `prompt` +- `explanation` +- `tags` +- `references` + +Type-specific fields: +- `single-choice` and `true-false`: `options`, `correctAnswer:number` +- `multi-select`: `options`, `correctAnswer:number[]` +- `ordering`: `items`, `correctAnswer:string[]` +- `matching`: `premises`, `responses`, `correctAnswer:number[]` + +## Update Workflow +1. Identify the module and read the module section files that define the learning content. +2. Read `quiz-banks/{module}.quiz.json` in full if it exists. +3. Build a normalized dedupe key for each existing question using: + - normalized `prompt` + - normalized `scenario` + - `type` + - normalized correct-answer payload + - normalized reference label set +4. Generate only scenario-based questions that are meaningfully distinct from the existing bank. +5. Reject any generated question that collides with an existing dedupe key or substantially repeats the same answer pattern for the same concept. +6. Validate that every reference label exists for the module. +7. Validate that the final JSON remains well-formed and schema-complete. + +## Quality Bar +- Prefer advanced and expert questions over recall-only prompts. +- Use operational, debugging, architecture, performance, or incident-response scenarios. +- Include multiple question types where appropriate instead of only single-choice. +- Explanations must explain why the best answer is correct, not just restate it. + +## Output Format +Return a short summary with: +- module name +- file touched +- questions added +- questions updated +- duplicates skipped +- highest question id after the update \ No newline at end of file diff --git a/README.md b/README.md index 8ac9df0..4394b65 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ **🌐 Live Website**: [https://codexecutives.com](https://codexecutives.com) -**✨ Now featuring 14 complete learning modules with 90+ interactive visualizations covering Git, JavaScript Engine, RxJS, Data Structures, Next.js, Big-O Notation, Python Programming, AI Fundamentals, Node.js Ecosystem, DevOps & Cloud Computing, Auth & Security and a LeetCode-style playground with advanced debugging and gamification.** +**✨ Now featuring 14 complete learning modules with 90+ interactive visualizations, root-level scenario quiz banks for every module, shared timed quizzes with local progress tracking, and a LeetCode-style playground with advanced debugging and gamification.** > **📌 Repository Maintainers**: See [REPOSITORY-ABOUT-QUICK-REFERENCE.md](./docs/REPOSITORY-ABOUT-QUICK-REFERENCE.md) for GitHub repository About section configuration (description, website, and topics). @@ -23,6 +23,7 @@ - **Node.js Ecosystem**: Deep dive into Event Loop, V8 memory, Streams, Clustering, module systems, package managers, frameworks, and runtime wars (Node vs Deno vs Bun) - **DevOps & Cloud Computing**: CI/CD pipelines, cloud service models (IaaS/PaaS/SaaS/FaaS), container orchestration with Kubernetes, Infrastructure as Code, observability, and modern DevOps roles - **Auth & Security**: Modern authentication and authorization — OAuth 2.0, OIDC, PKCE, WebAuthn/Passkeys, Zero Trust, BFF Pattern, and AI agent authentication +- **Shared Timed Module Quizzes**: Every learning module ends with a 5-question, 10-minute scenario quiz backed by a root-level JSON bank and localStorage result history - **LeetCode-Style Playground**: Interactive coding environment with debugging, visualizations, and gamification ### 🎮 **Interactive Visualizations** @@ -47,6 +48,16 @@ - **Keyboard Shortcuts**: F10 (step), F5 (continue/pause), F11 (reset), Space (pause/resume) - **Rate-Limited Output**: Console entries capped at 500, oversized strings truncated to 50 KB +### 🧠 **Timed Module Quizzes** + +- **Shared Quiz Runtime**: One reusable quiz system powers every learning module while preserving module-specific styling and references +- **Root-Level Quiz Banks**: Each module has a dedicated JSON file in `quiz-banks/{module}.quiz.json` +- **Scenario-Based Questions**: Every bank contains 30+ harder prompts with single-choice, multi-select, true/false, ordering, and matching formats +- **10-Minute Timer**: Each run samples 5 questions and persists the active attempt across refreshes +- **Local Progress History**: Scores, tiers, and recent attempts are stored in localStorage on the learner's machine +- **Wrong-Answer References**: Review mode links learners back to the exact module sections that support the missed concept +- **Authoring Workflow**: The workspace agent `.github/agents/module-quiz-generator.agent.md` maintains quiz banks without duplicating questions or answer sets + ### 🎨 **Modern User Experience** - **Responsive Design**: Works seamlessly on desktop, tablet, and mobile @@ -78,6 +89,7 @@ ## 📁 Project Structure ``` +quiz-banks/ # Root-level module quiz banks named {module}.quiz.json src/ ├── components/ # Reusable UI components │ ├── models2d/ # 2D visualization components @@ -118,21 +130,49 @@ src/ │ ├── bigo/ # Big-O notation concepts (8 sections) │ └── python/ # Python programming concepts (5 sections) ├── hooks/ # Custom React hooks +├── shared/ +│ ├── components/ +│ │ └── quiz/ # Shared timed quiz UI and answer renderers +│ ├── constants/ +│ │ └── moduleNavigation.ts # Shared module navigation and reference metadata +│ └── hooks/ +│ └── useQuizSession.ts # Timed quiz session and localStorage persistence ├── types/ # TypeScript type definitions │ ├── nextjs.ts # Next.js type definitions │ ├── datastructures.ts # Data structures type definitions │ ├── bigo.ts # Big-O notation type definitions │ ├── python.ts # Python programming type definitions -│ └── playground.ts # Playground type definitions +│ ├── playground.ts # Playground type definitions +│ └── quiz.ts # Shared quiz schema and result types ├── utils/ # Utility functions │ ├── instrument.ts # Code instrumentation for debugging │ ├── memoryMonitor.ts # Performance monitoring +│ ├── quiz.ts # Quiz loading, scoring, and answer utilities │ └── theme.ts # Theme and styling utilities ├── data/ # Static data and problem sets │ └── problems.ts # LeetCode-style coding problems └── three/ # Three.js 3D models and scenes ``` +### Quiz Bank Naming + +- `quiz-banks/javascript.quiz.json` +- `quiz-banks/rxjs.quiz.json` +- `quiz-banks/git.quiz.json` +- `quiz-banks/datastructures.quiz.json` +- `quiz-banks/react.quiz.json` +- `quiz-banks/nextjs.quiz.json` +- `quiz-banks/bigo.quiz.json` +- `quiz-banks/python.quiz.json` +- `quiz-banks/systemdesign.quiz.json` +- `quiz-banks/typescript.quiz.json` +- `quiz-banks/ai.quiz.json` +- `quiz-banks/nodejs.quiz.json` +- `quiz-banks/devops.quiz.json` +- `quiz-banks/auth.quiz.json` + +See `docs/Quiz-System-Guide.md` for the JSON schema, localStorage behavior, and the quiz-bank authoring workflow. + ## 🚀 Quick Start ### Prerequisites diff --git a/docs/Project-Layout-and-Structure.md b/docs/Project-Layout-and-Structure.md index 2d17a0e..2398093 100644 --- a/docs/Project-Layout-and-Structure.md +++ b/docs/Project-Layout-and-Structure.md @@ -12,6 +12,7 @@ This document describes the layout and structure of the Code Executives web appl - **Sidebar**: Collapsible drawer, context-sensitive. Shows JavaScript theory sections only on the JavaScript page. - **Footer**: Simple footer with copyright and About link. - **Main Content**: Renders the current page based on the route. +- **Timed Module Quiz Layer**: Shared end-of-module quiz section rendered inside every learning module, using query-param section routing and root-level quiz banks. --- @@ -22,6 +23,7 @@ This document describes the layout and structure of the Code Executives web appl - `/` → Home - `/about` → About - `/javascript` → JavaScript page (with tabs and sidebar sections) +- Learning modules keep their existing route paths and append the shared quiz through `?section=Quiz` instead of adding separate routes. --- @@ -40,6 +42,9 @@ This document describes the layout and structure of the Code Executives web appl - `Header.tsx`: AppBar with navigation and sidebar toggle. - `Sidebar.tsx`: Drawer with context-sensitive section links. - `Footer.tsx`: Footer bar. +- `/src/shared/components/quiz/` + - `ModuleQuizSection.tsx`: Shared quiz shell used across all learning modules. + - `QuestionRenderer.tsx`: Shared renderer for single-choice, multi-select, true/false, ordering, and matching questions. - `/src/pages/` - `Home.tsx`: Home page. - `About.tsx`: About page. @@ -47,6 +52,16 @@ This document describes the layout and structure of the Code Executives web appl - `JavaScript2D.tsx`, `JavaScript3D.tsx`: Visualization canvases. - `/src/sections/` - Individual theory sub-section components (Introduction, EngineRuntime, etc.) +- `/src/shared/hooks/` + - `useQuizSession.ts`: Loads root-level quiz banks, persists active attempts, and stores local quiz results in localStorage. +- `/src/shared/constants/` + - `moduleNavigation.ts`: Shared module section metadata used for quiz references and navigation. +- `/src/types/quiz.ts` + - Shared quiz schema for bank files, active attempts, and result summaries. +- `/quiz-banks/` + - Root-level JSON quiz banks named `{module}.quiz.json` so agents and humans can discover them quickly. +- `/.github/agents/module-quiz-generator.agent.md` + - Workspace agent for growing or updating module quiz banks without duplicating questions or answer sets. --- @@ -55,6 +70,20 @@ This document describes the layout and structure of the Code Executives web appl - Sidebar is hidden by default and toggled via the header button. - Sidebar sections are shown only for the JavaScript page. - Clicking a sidebar section updates the query param and displays the corresponding theory component. +- The quiz section is appended as the final sidebar entry for every learning module. +- Wrong-answer review links resolve through the shared module navigation metadata so quiz references land on the correct module section. + +--- + +## Quiz System + +- Every learning module now ends with a shared quiz experience. +- Each quiz run samples 5 questions from a 30+ question module bank. +- The timer is fixed at 10 minutes and survives refresh via localStorage. +- Results are stored locally and exposed back to the learner in the module quiz sidebar. +- Wrong answers link back to the relevant in-module sections for review. +- Quiz content lives outside `src/` in the root-level `quiz-banks/` directory to keep authoring and agent discovery simple. +- The `module-quiz-generator` custom agent is the canonical workflow for expanding or refreshing module quiz banks. --- @@ -69,8 +98,8 @@ This document describes the layout and structure of the Code Executives web appl - Add more languages and context-sensitive sidebar sections. - Implement code editor and interactive demos for each mode. -- Expand documentation as new features are added. +- Expand the quiz-bank authoring guide as the generator workflow evolves. --- -_Last updated: September 15, 2025_ +_Last updated: April 9, 2026_ diff --git a/docs/Quiz-System-Guide.md b/docs/Quiz-System-Guide.md new file mode 100644 index 0000000..a5d23e2 --- /dev/null +++ b/docs/Quiz-System-Guide.md @@ -0,0 +1,142 @@ +# Quiz System Guide + +## Overview + +Code Executives now includes a shared timed quiz system for every learning module except the Playground. + +- Each attempt uses 5 questions. +- Each attempt has a 10-minute timer. +- Every module bank contains 30+ scenario-based questions. +- Attempts and results are stored in localStorage so the learner can resume an in-progress quiz after a refresh. +- Wrong answers include deep links back into the relevant module sections. + +## Bank Location + +Quiz banks live at the repository root in `quiz-banks/`. + +Naming convention: + +- `quiz-banks/javascript.quiz.json` +- `quiz-banks/rxjs.quiz.json` +- `quiz-banks/git.quiz.json` +- `quiz-banks/datastructures.quiz.json` +- `quiz-banks/react.quiz.json` +- `quiz-banks/nextjs.quiz.json` +- `quiz-banks/bigo.quiz.json` +- `quiz-banks/python.quiz.json` +- `quiz-banks/systemdesign.quiz.json` +- `quiz-banks/typescript.quiz.json` +- `quiz-banks/ai.quiz.json` +- `quiz-banks/nodejs.quiz.json` +- `quiz-banks/devops.quiz.json` +- `quiz-banks/auth.quiz.json` + +## JSON Schema + +Every bank is a single JSON object: + +```json +{ + "moduleId": "javascript", + "moduleTitle": "JavaScript", + "description": "Scenario-based assessment...", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [] +} +``` + +Each question must include: + +- `id` +- `type` +- `difficulty` +- `scenario` +- `prompt` +- `explanation` +- `tags` +- `references` + +Supported question types: + +- `single-choice` +- `multi-select` +- `true-false` +- `ordering` +- `matching` + +Type-specific fields: + +- `single-choice` and `true-false`: `options`, `correctAnswer:number` +- `multi-select`: `options`, `correctAnswer:number[]` +- `ordering`: `items`, `correctAnswer:string[]` +- `matching`: `premises`, `responses`, `correctAnswer:number[]` + +## Runtime Files + +- `src/shared/components/quiz/ModuleQuizSection.tsx` +- `src/shared/components/quiz/QuestionRenderer.tsx` +- `src/shared/hooks/useQuizSession.ts` +- `src/shared/constants/moduleNavigation.ts` +- `src/types/quiz.ts` +- `src/utils/quiz.ts` + +## localStorage Behavior + +Per-module storage keys use the prefix `code-executives.quiz`. + +For each module: + +- `code-executives.quiz.{module}.active-attempt` +- `code-executives.quiz.{module}.results` + +The active-attempt key stores: + +- selected question ids +- current answers +- current question index +- start timestamp +- expiry timestamp + +The results key stores recent local summaries so learners can see prior scores and tiers. + +## Wrong-Answer References + +Question `references` use exact section labels from `src/shared/constants/moduleNavigation.ts`. + +At review time the runtime resolves those labels into module section links so learners can jump directly back into the source material. + +## Generator Agent + +Use the workspace custom agent: + +- `.github/agents/module-quiz-generator.agent.md` + +Its responsibilities: + +- read the full existing module bank first +- inspect the module source content +- generate only scenario-based, non-trivial questions +- avoid duplicate questions and duplicate answer sets for near-identical prompts +- preserve stable ids for unchanged questions +- append new question ids sequentially + +## Authoring Rules + +- Keep question prompts scenario-based. +- Prefer advanced and expert prompts over recall-only trivia. +- Use exact section labels already defined for the module. +- Keep explanations diagnostic and instructional. +- Do not move quiz banks into `src/` or `public/`; the root-level `quiz-banks/` directory is canonical. + +## Validation Checklist + +Before merging quiz-bank changes: + +1. Confirm the JSON file remains valid. +2. Confirm `moduleId` matches the filename. +3. Confirm there are 30+ questions. +4. Confirm at least 5 questions can be sampled without duplicate ids. +5. Confirm reference labels exist in `src/shared/constants/moduleNavigation.ts`. +6. Run `npm run build` to confirm the loader still compiles. \ No newline at end of file diff --git a/quiz-banks/ai.quiz.json b/quiz-banks/ai.quiz.json new file mode 100644 index 0000000..5b97f49 --- /dev/null +++ b/quiz-banks/ai.quiz.json @@ -0,0 +1,798 @@ +{ + "moduleId": "ai", + "moduleTitle": "Artificial Intelligence", + "description": "A difficult scenario-based quiz on ML systems, neural networks, embeddings, and RAG.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "ai-q01", + "type": "single-choice", + "difficulty": "hard", + "scenario": "An ICU team is deploying a sepsis alert where only 0.5% of encounters are positive. The current model reaches 99.4% accuracy with standard binary cross-entropy, but it misses too many true cases to be clinically useful.", + "prompt": "Which change most directly aligns training with the need to catch the rare positive cases while still producing probabilities?", + "options": [ + "Increase hidden layer width and keep the unweighted loss", + "Apply class-weighted loss or focal loss during training", + "Lower the inference threshold during preprocessing", + "Replace the binary labels with percentile ranks" + ], + "correctAnswer": 1, + "explanation": "With extreme class imbalance, standard loss is dominated by easy negatives. Class-weighted or focal loss increases the contribution of rare positive errors, so optimization pays more attention to recall without abandoning probabilistic outputs.", + "tags": [ + "class-imbalance", + "loss-design", + "medical-ml" + ], + "references": [ + "Loss Functions", + "Generalization" + ] + }, + { + "id": "ai-q02", + "type": "true-false", + "difficulty": "hard", + "scenario": "A demand forecasting model evaluated with a random split across 24 months looks strong offline, but it fails on a last-quarter holdout after pricing policy changes.", + "prompt": "True or false: The random-split validation score is a reliable estimate of future performance simply because it used unseen rows.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Unseen rows are not enough when the split ignores time and policy drift. If evaluation does not mirror the production decision point, the offline score can look excellent while future performance collapses.", + "tags": [ + "temporal-split", + "distribution-shift", + "evaluation" + ], + "references": [ + "ML Lifecycle", + "Generalization" + ] + }, + { + "id": "ai-q03", + "type": "multi-select", + "difficulty": "hard", + "scenario": "A subscription churn model is built from a warehouse snapshot taken one day after some cancellation events. The team is deciding which features are safe to keep.", + "prompt": "Which choices introduce leakage or target contamination?", + "options": [ + "Using the last support ticket status at snapshot time when some of those tickets happened after churn", + "Using the number of failed renewals observed before the prediction cutoff", + "Using account age measured at the prediction cutoff", + "Using days since cancel request when the cancel request can occur after the intended prediction time", + "Using the last invoice amount recorded strictly before the prediction cutoff" + ], + "correctAnswer": [ + 0, + 3 + ], + "explanation": "Any feature that depends on events after the true prediction moment leaks information from the future. Pre-cutoff renewals, account age, and pre-cutoff invoice amounts are defensible; post-event support state and post-cutoff cancel signals are not.", + "tags": [ + "leakage", + "feature-selection", + "churn" + ], + "references": [ + "ML Lifecycle", + "Feature Engineering", + "Generalization" + ] + }, + { + "id": "ai-q04", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A credit underwriting linear model underfits because debt-to-income ratio and a recent utilization spike matter mostly in combination, not separately.", + "prompt": "What is the best feature engineering change if the team wants the linear model to capture this pattern without switching model families?", + "options": [ + "Standardize each variable separately and retrain", + "Add an interaction feature combining debt-to-income ratio and utilization spike", + "Drop both variables to reduce noise", + "Increase the training set size without changing the inputs" + ], + "correctAnswer": 1, + "explanation": "A plain linear model cannot express multiplicative effects unless the interaction is explicitly engineered. Adding the interaction term lets the model represent the combined risk signal directly.", + "tags": [ + "linear-models", + "interaction-terms", + "credit-risk" + ], + "references": [ + "Feature Engineering" + ] + }, + { + "id": "ai-q05", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A deep tabular network uses sigmoid activations in every hidden layer. Early in training, lower-layer gradients are nearly zero, and activations cluster near 0 or 1 after large random initialization.", + "prompt": "Which intervention most directly addresses the root cause of the stalled learning?", + "options": [ + "Use smaller variance-aware initialization and switch to a less saturating activation such as ReLU", + "Increase batch size so gradients average out", + "Remove the output-layer bias term", + "Replace the labels with a larger one-hot vector" + ], + "correctAnswer": 0, + "explanation": "Large initial weights push sigmoids into saturation, where gradients become tiny. Better initialization and non-saturating activations reduce vanishing-gradient behavior and let error signals reach earlier layers.", + "tags": [ + "vanishing-gradients", + "initialization", + "deep-learning" + ], + "references": [ + "Neural Networks", + "Backpropagation" + ] + }, + { + "id": "ai-q06", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A multiclass classifier predicts probabilities [0.97, 0.02, 0.01] for an example whose true class is the second one. The team is debating mean squared error versus cross-entropy.", + "prompt": "Which loss will produce the stronger corrective signal for this confident mistake, and why?", + "options": [ + "Mean squared error, because squared error always grows faster near 1", + "Cross-entropy, because confidently wrong probabilities are penalized sharply through the log term", + "Either loss, because gradients are identical after softmax", + "Neither, because loss choice only changes calibration, not learning" + ], + "correctAnswer": 1, + "explanation": "Cross-entropy heavily penalizes confident mistakes, which yields a strong gradient pushing probability mass toward the correct class. That is why it is usually preferred for classification.", + "tags": [ + "cross-entropy", + "classification", + "optimization" + ], + "references": [ + "Loss Functions", + "Gradient Descent" + ] + }, + { + "id": "ai-q07", + "type": "single-choice", + "difficulty": "hard", + "scenario": "Training loss oscillates sharply between steps, although the epoch average still trends downward. The run uses a high learning rate, small mini-batches, and mildly noisy labels.", + "prompt": "What is the most defensible first adjustment if the goal is steadier optimization without changing the objective?", + "options": [ + "Lower the learning rate or use a schedule with warmup and decay", + "Increase model depth so it can absorb the noise", + "Disable data shuffling to stabilize batches", + "Evaluate only every ten epochs" + ], + "correctAnswer": 0, + "explanation": "Large step sizes combined with gradient noise often create unstable updates. Reducing the learning rate or introducing a schedule is the most direct way to make descent smoother.", + "tags": [ + "learning-rate", + "mini-batches", + "stability" + ], + "references": [ + "Gradient Descent" + ] + }, + { + "id": "ai-q08", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A vision model stops improving after several ReLU units become permanently inactive. Gradient inspection shows those neurons output zero for almost all inputs.", + "prompt": "Which intervention most directly reduces this failure mode?", + "options": [ + "Increase weight decay", + "Use leaky ReLU or better initialization to avoid dead units", + "Convert labels to ordinal targets", + "Freeze the earlier layers" + ], + "correctAnswer": 1, + "explanation": "Dead ReLUs arise when neurons are pushed into a regime where they never activate and therefore never receive useful gradients. Leaky ReLU and better initialization both reduce that risk.", + "tags": [ + "dead-relu", + "activation-functions", + "debugging" + ], + "references": [ + "Neural Networks", + "Backpropagation" + ] + }, + { + "id": "ai-q09", + "type": "true-false", + "difficulty": "hard", + "scenario": "A semantic search team normalizes every document embedding and query embedding to unit length before retrieval.", + "prompt": "True or false: After unit normalization, ranking by dot product is equivalent to ranking by cosine similarity.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Cosine similarity is the dot product divided by the vector norms. Once all vectors have norm 1, the two rankings are identical.", + "tags": [ + "embeddings", + "similarity", + "retrieval" + ], + "references": [ + "Word Embeddings" + ] + }, + { + "id": "ai-q10", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A RAG assistant indexes PDF chunks of about 2500 tokens each. Relevant evidence is often buried in the middle of a chunk, and the top retrieved passages consume too much prompt budget while still missing the exact answer.", + "prompt": "Which change is most likely to improve grounding without increasing model size?", + "options": [ + "Use smaller semantically coherent chunks with overlap and rebuild the index", + "Raise generation temperature so the model explores more possibilities", + "Remove retrieval and rely on parametric memory", + "Train the generator directly on retrieved passages as labels" + ], + "correctAnswer": 0, + "explanation": "Very large chunks blur retrieval signals and waste context window space. Smaller, coherent chunks with overlap usually improve recall of the exact evidence the generator should ground on.", + "tags": [ + "chunking", + "retrieval-quality", + "context-window" + ], + "references": [ + "RAG Pipeline" + ] + }, + { + "id": "ai-q11", + "type": "multi-select", + "difficulty": "hard", + "scenario": "A product classifier reaches 99% training accuracy but only 81% validation accuracy, and many wrong validation predictions have confidence near 1.0.", + "prompt": "Which actions specifically target overfitting or miscalibration rather than simply making optimization faster?", + "options": [ + "Add regularization or reduce model capacity", + "Collect more representative data or apply stronger augmentation", + "Increase batch size only to speed throughput", + "Use early stopping based on validation behavior", + "Raise the learning rate so training reaches 100% accuracy sooner" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Regularization, stronger data coverage, and early stopping all improve generalization or reduce overconfidence. Increasing batch size for speed or pushing training accuracy even faster does not directly solve the validation gap.", + "tags": [ + "overfitting", + "calibration", + "validation-gap" + ], + "references": [ + "Generalization", + "ML Lifecycle" + ] + }, + { + "id": "ai-q12", + "type": "single-choice", + "difficulty": "hard", + "scenario": "An image model with batch normalization performs well during training but degrades badly in a single-request production service. Investigation shows the serving path recomputes batch statistics from the current request.", + "prompt": "What is the root issue?", + "options": [ + "The model is using training-mode batch normalization behavior during inference", + "The optimizer state was not shipped with the model", + "The labels were one-hot encoded", + "The embeddings are too low-dimensional" + ], + "correctAnswer": 0, + "explanation": "Batch normalization should use frozen running statistics at inference time. Recomputing batch stats from a single request creates unstable behavior and a clear training-serving mismatch.", + "tags": [ + "batch-normalization", + "serving", + "inference-bugs" + ], + "references": [ + "Training vs Inference", + "Neural Networks" + ] + }, + { + "id": "ai-q13", + "type": "ordering", + "difficulty": "hard", + "scenario": "A team is rebuilding a recommendation model after business goals changed and wants a sequence that will hold up in production, not just in a notebook.", + "prompt": "Place these steps in the most defensible order for an ML project that must survive deployment.", + "items": [ + "Define the prediction target and success metric", + "Split data to match the production decision point", + "Engineer features and train candidate models", + "Evaluate on held-out data and inspect failures", + "Deploy with monitoring for drift and quality" + ], + "correctAnswer": [ + "Define the prediction target and success metric", + "Split data to match the production decision point", + "Engineer features and train candidate models", + "Evaluate on held-out data and inspect failures", + "Deploy with monitoring for drift and quality" + ], + "explanation": "Good ML work begins with objective definition, then a production-faithful split. Training comes after data framing, evaluation comes before launch, and deployment must include monitoring because the lifecycle does not end at shipping.", + "tags": [ + "ml-lifecycle", + "ordering", + "productionization" + ], + "references": [ + "Introduction", + "ML Lifecycle", + "Feature Engineering" + ] + }, + { + "id": "ai-q14", + "type": "matching", + "difficulty": "hard", + "scenario": "You are reviewing postmortems from several failed launches and need to connect each symptom to the most likely root cause.", + "prompt": "Match each symptom to the most likely underlying problem.", + "premises": [ + "Validation AUC is high, but production AUC collapses immediately after launch", + "Training loss keeps falling while validation loss rises", + "Nearest-neighbor retrieval returns documents with overlapping keywords but the wrong meaning", + "Online latency spikes after adding a cross-encoder reranker to every retrieved chunk" + ], + "responses": [ + "Overfitting and weak generalization", + "An evaluation split that failed to mirror production or contained leakage", + "An embedding space that does not capture domain semantics well", + "A RAG ranking stage that is too expensive at inference time" + ], + "correctAnswer": [ + 1, + 0, + 2, + 3 + ], + "explanation": "Immediate failure after launch points to evaluation mismatch or leakage, not ordinary optimization noise. Rising validation loss with falling training loss is classic overfitting. Semantically wrong nearest neighbors implicate embeddings, and reranking every chunk is a direct latency cost in the retrieval pipeline.", + "tags": [ + "postmortem", + "root-cause-analysis", + "systems-debugging" + ], + "references": [ + "ML Lifecycle", + "Generalization", + "Word Embeddings", + "RAG Pipeline" + ] + }, + { + "id": "ai-q15", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A legal-domain search system uses general-purpose embeddings trained on web text. Queries containing terms like consideration, estoppel, and constructive notice retrieve consumer advice blogs instead of case law.", + "prompt": "Which next step is most likely to improve semantic retrieval quality?", + "options": [ + "Increase top-k from 5 to 50 without changing embeddings", + "Use domain-adapted embeddings or fine-tune on legal similarity pairs", + "Sort retrieved documents alphabetically before generation", + "Decrease the vector dimension by half to remove noise" + ], + "correctAnswer": 1, + "explanation": "The failure is semantic mismatch, not just insufficient breadth. Domain-adapted embeddings or contrastive fine-tuning on legal relevance pairs are the most direct fix.", + "tags": [ + "domain-adaptation", + "legal-ai", + "semantic-search" + ], + "references": [ + "Word Embeddings", + "RAG Pipeline" + ] + }, + { + "id": "ai-q16", + "type": "true-false", + "difficulty": "hard", + "scenario": "Two models have identical validation accuracy, but Model A keeps lowering training loss for 50 extra epochs while validation loss stays flat.", + "prompt": "True or false: Model A is necessarily the better deployment choice because it optimized the training objective more completely.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Lower training loss alone does not imply better generalization. If validation quality is flat, the extra optimization may offer no benefit and can even increase deployment risk through overfitting or poorer calibration.", + "tags": [ + "model-selection", + "overtraining", + "deployment" + ], + "references": [ + "Generalization", + "Training vs Inference" + ] + }, + { + "id": "ai-q17", + "type": "multi-select", + "difficulty": "hard", + "scenario": "An internal knowledge assistant sometimes fabricates policy details. Logs show that top-3 retrieval occasionally misses the current policy revision entirely.", + "prompt": "Which changes can reduce grounded-answer failures?", + "options": [ + "Add citation checking against retrieved passages before finalizing the answer", + "Improve retrieval with better chunking or stronger embeddings", + "Increase generation temperature so the model explores more possibilities", + "Use query rewriting or expansion before retrieval", + "Remove retrieved context whenever it conflicts with the model prior" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Grounded failures are best addressed by improving retrieval quality and validating that the generated answer is actually supported by the retrieved evidence. Higher temperature and ignoring retrieved context both move in the wrong direction.", + "tags": [ + "hallucination-control", + "retrieval", + "grounding" + ], + "references": [ + "RAG Pipeline" + ] + }, + { + "id": "ai-q18", + "type": "single-choice", + "difficulty": "hard", + "scenario": "Two identical models are trained on the same data. Full-batch gradient descent converges to a sharp solution with brittle validation behavior, while a mini-batch run is noisier but generalizes better.", + "prompt": "What is the best explanation for the stronger validation performance of the mini-batch run?", + "options": [ + "Mini-batch noise can act as an implicit regularizer and help optimization settle in flatter regions", + "Full-batch gradients are mathematically incorrect", + "Mini-batches remove the need for learning-rate tuning", + "Validation accuracy is unrelated to the optimization path" + ], + "correctAnswer": 0, + "explanation": "Stochasticity from mini-batches can bias training toward flatter minima that often generalize better. That does not make full-batch gradients wrong; it means optimization dynamics affect generalization.", + "tags": [ + "implicit-regularization", + "optimization", + "flat-minima" + ], + "references": [ + "Gradient Descent", + "Generalization" + ] + }, + { + "id": "ai-q19", + "type": "single-choice", + "difficulty": "hard", + "scenario": "An engineer adds an auxiliary branch for interpretability and accidentally calls detach() on the shared representation before the main loss is computed. Upper layers still learn, but the encoder barely changes.", + "prompt": "Why does the encoder stop improving?", + "options": [ + "detach() stopped gradients from backpropagating into the shared encoder", + "detach() doubled the effective learning rate", + "detach() only affects inference speed, not learning", + "detach() converted the objective into cross-entropy" + ], + "correctAnswer": 0, + "explanation": "Calling detach() breaks the computation graph at that point. The loss can no longer send gradients into the encoder, so those parameters stop receiving meaningful updates.", + "tags": [ + "autograd", + "gradient-flow", + "debugging" + ], + "references": [ + "Backpropagation" + ] + }, + { + "id": "ai-q20", + "type": "ordering", + "difficulty": "hard", + "scenario": "You are explaining one training iteration of a feedforward network to a new teammate who keeps mixing up prediction, error calculation, and parameter updates.", + "prompt": "Order the operations for one supervised training step from prediction to parameter update.", + "items": [ + "Compute the forward activations", + "Evaluate the loss against the target", + "Backpropagate gradients through the network", + "Update parameters with the optimizer" + ], + "correctAnswer": [ + "Compute the forward activations", + "Evaluate the loss against the target", + "Backpropagate gradients through the network", + "Update parameters with the optimizer" + ], + "explanation": "A training step first produces predictions, then measures error, then propagates gradients backward, and only then changes parameters. Reversing any of those dependencies breaks the logic of supervised learning.", + "tags": [ + "training-step", + "ordering", + "deep-learning-basics" + ], + "references": [ + "Neural Networks", + "Loss Functions", + "Backpropagation", + "Gradient Descent" + ] + }, + { + "id": "ai-q21", + "type": "matching", + "difficulty": "hard", + "scenario": "A platform team is standardizing learning objectives across products and wants each task matched to the right loss family.", + "prompt": "Match each modeling situation to the most suitable loss.", + "premises": [ + "Binary fraud detection with calibrated probabilities", + "Predicting a continuous house price", + "Multiclass document labeling with exactly one correct class", + "Learning a search model where relevant items should outrank irrelevant ones" + ], + "responses": [ + "Binary cross-entropy / log loss", + "Mean squared error", + "Softmax cross-entropy", + "Pairwise ranking loss" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Binary probabilistic classification maps naturally to binary cross-entropy, regression to mean squared error, multiclass single-label classification to softmax cross-entropy, and ranking tasks to pairwise ranking losses.", + "tags": [ + "objective-selection", + "matching", + "loss-functions" + ], + "references": [ + "Loss Functions" + ] + }, + { + "id": "ai-q22", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A logistics ETA model uses only raw latitude and longitude differences between stops. It consistently misses traffic behavior around city centers during rush hour.", + "prompt": "Which engineered feature is most likely to help a simpler model capture the missing pattern?", + "options": [ + "An interaction between location region and time-of-day bucket", + "Removing geographic features entirely", + "Replacing labels with route IDs", + "Increasing batch size" + ], + "correctAnswer": 0, + "explanation": "The missed structure is contextual: location alone is insufficient, and time alone is insufficient. An interaction between region and time-of-day lets a simple model express location-specific rush-hour effects.", + "tags": [ + "feature-design", + "eta-prediction", + "contextual-features" + ], + "references": [ + "Feature Engineering" + ] + }, + { + "id": "ai-q23", + "type": "true-false", + "difficulty": "hard", + "scenario": "A language classifier has poor training accuracy and equally poor validation accuracy. A teammate suggests stronger dropout and weight decay as the first fix.", + "prompt": "True or false: Stronger regularization is the best first move because the model is obviously overfitting.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "When both training and validation performance are poor, the model is more likely underfitting or the features are weak. Stronger regularization usually makes that situation worse, not better.", + "tags": [ + "underfitting", + "regularization", + "diagnosis" + ], + "references": [ + "Generalization", + "Neural Networks" + ] + }, + { + "id": "ai-q24", + "type": "multi-select", + "difficulty": "hard", + "scenario": "A recommendation model uses only request-time features offline, but online predictions are noticeably worse. Investigation reveals several differences between training and serving.", + "prompt": "Which findings are genuine examples of training-inference skew?", + "options": [ + "The serving pipeline uses stale category encodings from last month", + "The online system imputes missing prices with 0, while training used median imputation", + "Validation loss was computed once per epoch", + "Training normalized a feature with training-set statistics, but serving skips normalization", + "The deployed weights were quantized only after acceptable offline equivalence tests" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Skew occurs when the model sees different feature representations at serving time than it saw during training. Stale encodings, different imputation, and missing normalization all qualify; evaluation cadence and validated quantization do not.", + "tags": [ + "serving-skew", + "feature-pipeline", + "mlops" + ], + "references": [ + "Training vs Inference", + "ML Lifecycle" + ] + }, + { + "id": "ai-q25", + "type": "single-choice", + "difficulty": "hard", + "scenario": "In a sentence-embedding system, almost all cosine similarities fall between 0.82 and 0.90, making nearest-neighbor rankings unstable. The team suspects anisotropy in the embedding space.", + "prompt": "Which remedy is most aligned with that diagnosis?", + "options": [ + "Apply centering or whitening, or retrain with a contrastive objective that spreads representations apart", + "Switch from cosine similarity to alphabetical sorting", + "Increase prompt length during generation", + "Use mean squared error on document IDs" + ], + "correctAnswer": 0, + "explanation": "Anisotropic embeddings cluster too tightly, so many candidates look similarly close. Techniques that re-spread the space or retrain the encoder with a discriminative similarity objective address the actual geometry problem.", + "tags": [ + "anisotropy", + "embedding-space", + "retrieval-debugging" + ], + "references": [ + "Word Embeddings" + ] + }, + { + "id": "ai-q26", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A classifier is accurate overall but overconfident, which causes bad threshold decisions in a downstream triage system.", + "prompt": "Which training change most directly targets overconfidence while preserving classification learning?", + "options": [ + "Apply label smoothing during training", + "Increase the number of classes", + "Shuffle the labels once per epoch", + "Optimize accuracy directly and stop computing loss" + ], + "correctAnswer": 0, + "explanation": "Label smoothing discourages extreme probability assignments and often improves calibration while still training the classifier on the same task. The other options either change nothing useful or destroy the learning signal.", + "tags": [ + "calibration", + "label-smoothing", + "classification" + ], + "references": [ + "Loss Functions", + "Generalization" + ] + }, + { + "id": "ai-q27", + "type": "true-false", + "difficulty": "hard", + "scenario": "A retrieval team replaced 384-dimensional embeddings with 3072-dimensional embeddings and saw no improvement because domain labels are scarce and the index grew noisier.", + "prompt": "True or false: Higher-dimensional embeddings necessarily improve retrieval quality if the encoder architecture is unchanged.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Embedding quality depends on representation learning and domain fit, not just dimensionality. Larger vectors can add cost and noise without improving semantic structure.", + "tags": [ + "embedding-dimension", + "retrieval-quality", + "representation-learning" + ], + "references": [ + "Word Embeddings" + ] + }, + { + "id": "ai-q28", + "type": "single-choice", + "difficulty": "hard", + "scenario": "A RAG assistant must answer under 700 ms p95. The current pipeline embeds the query, retrieves 40 chunks, reranks all 40 with a heavy cross-encoder, and then generates a response. Answer quality is good, but latency misses the budget.", + "prompt": "Which change is most likely to preserve quality while cutting latency?", + "options": [ + "Reduce first-stage retrieval breadth moderately and rerank only a smaller candidate set", + "Increase generator temperature so fewer tokens are needed", + "Remove the vector index and scan all documents", + "Replace dense embeddings with one-hot vectors" + ], + "correctAnswer": 0, + "explanation": "The expensive step is reranking too many candidates. A narrower candidate set usually preserves most of the quality while sharply reducing inference cost.", + "tags": [ + "latency", + "reranking", + "rag-optimization" + ], + "references": [ + "RAG Pipeline", + "Training vs Inference" + ] + }, + { + "id": "ai-q29", + "type": "multi-select", + "difficulty": "hard", + "scenario": "After a model refactor, validation accuracy collapses. You suspect a gradient bug rather than bad data.", + "prompt": "Which checks are appropriate for debugging backpropagation?", + "options": [ + "Numerically compare analytical gradients to finite-difference estimates on a tiny network", + "Verify that parameters meant to train actually receive nonzero gradients", + "Randomly reorder class labels each batch to see whether learning resumes", + "Inspect activation distributions for saturation or dead units", + "Skip the loss computation and optimize logits directly" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Gradient checking, verifying gradient flow, and inspecting activations are all standard ways to diagnose backprop failures. Random label scrambling and removing the loss break the problem rather than debug it.", + "tags": [ + "gradient-checking", + "backprop-debugging", + "neural-networks" + ], + "references": [ + "Backpropagation", + "Neural Networks" + ] + }, + { + "id": "ai-q30", + "type": "single-choice", + "difficulty": "hard", + "scenario": "You have 18,000 medical images from 600 patients, with multiple scans per patient. Reported leaderboard gains disappeared in clinical pilots because the original train and validation sets shared patients.", + "prompt": "Which evaluation strategy is most defensible now?", + "options": [ + "Use patient-grouped holdout or cross-validation so no patient appears in both splits", + "Use a random image-level split because it maximizes sample count", + "Use training accuracy because labels are expensive", + "Tune repeatedly on the test set for stability" + ], + "correctAnswer": 0, + "explanation": "When multiple samples come from the same entity, the split must respect that grouping. Otherwise the model can exploit patient-specific signatures and validation performance becomes unrealistically optimistic.", + "tags": [ + "grouped-splits", + "medical-imaging", + "evaluation-leakage" + ], + "references": [ + "ML Lifecycle", + "Generalization" + ] + }, + { + "id": "ai-q31", + "type": "single-choice", + "difficulty": "hard", + "scenario": "An executive asks whether an internal assistant that uses retrieval plus a generator counts as AI, since part of the system looks like search rather than pure end-to-end supervised classification.", + "prompt": "Which statement is the most technically accurate response for the design review?", + "options": [ + "It is not AI unless every component is a neural network trained end to end", + "It is an AI system because it combines learned representations and probabilistic generation to perform tasks that normally require human intelligence", + "It is just a database query engine because retrieval is involved", + "It is equivalent to a standard supervised classifier" + ], + "correctAnswer": 1, + "explanation": "AI is broader than a single model family. A system that combines retrieval, learned representations, and generation to answer questions or reason over information is still an AI system even if it is not trained end to end.", + "tags": [ + "ai-definition", + "system-design", + "rag" + ], + "references": [ + "Introduction", + "RAG Pipeline" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/auth.quiz.json b/quiz-banks/auth.quiz.json new file mode 100644 index 0000000..a5cdde6 --- /dev/null +++ b/quiz-banks/auth.quiz.json @@ -0,0 +1,861 @@ +{ + "moduleId": "auth", + "moduleTitle": "Authentication & Authorization", + "description": "Scenario-based assessment focused on advanced authentication and authorization architecture, identity evolution, federation protocols, PKCE mechanics, BFF security, tenant isolation, and AI agent access control.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "auth-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A consumer bank is retiring passwords after a credential-dump incident and is moving premium customers to passkey-based sign-in.", + "prompt": "Which WebAuthn property most directly prevents the stolen database material from being replayed as a login secret against the new portal?", + "options": [ + "The server stores only a public key while the private key remains on the user's authenticator", + "The server stores a reversible biometric template and compares it on every login", + "The browser reuses one signed challenge across every relying party", + "The relying party can reconstruct the private key from the attestation certificate" + ], + "correctAnswer": 0, + "explanation": "With WebAuthn and passkeys, the relying party stores a public key rather than a reusable shared secret. The private key never leaves the authenticator, so a database dump does not provide a credential the attacker can replay.", + "tags": [ + "passkeys", + "webauthn", + "public-key-crypto", + "phishing-resistance" + ], + "references": [ + "Authentication Types", + "Introduction" + ] + }, + { + "id": "auth-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A security team is deprecating legacy MFA methods for administrators after several adversary-in-the-middle phishing simulations succeeded.", + "prompt": "Which existing factors are still considered weak or phishable enough to prioritize for retirement?", + "options": [ + "SMS one-time passwords sent to a phone number", + "TOTP codes from an authenticator app", + "Push approval prompts without device-bound cryptographic proof", + "Hardware FIDO2 security keys", + "Synced passkeys unlocked locally with biometrics" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "SMS, TOTP, and generic push approvals remain vulnerable to SIM-swapping, real-time proxy phishing, or MFA fatigue. Hardware FIDO keys and passkeys are the stronger, phishing-resistant direction because they rely on asymmetric cryptography tied to the origin.", + "tags": [ + "mfa", + "totp", + "sms", + "push-fatigue", + "fido2" + ], + "references": [ + "Authentication Types", + "Evolution of Digital Identity" + ] + }, + { + "id": "auth-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A product manager claims that passkeys are just another way of sending a secret back to the server during login.", + "prompt": "True or false: during a passkey authentication ceremony, the application server receives a reusable shared secret that can be stolen and replayed later.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Passkeys use a challenge-response ceremony based on public-key cryptography. The server verifies a signature with the stored public key, but it does not receive a reusable shared secret.", + "tags": [ + "passkeys", + "challenge-response", + "shared-secrets", + "authentication" + ], + "references": [ + "Authentication Types", + "Introduction" + ] + }, + { + "id": "auth-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are preparing a briefing for new security engineers on how digital identity controls evolved from mainframes to cloud-native systems.", + "prompt": "Arrange these eras from earliest to latest.", + "items": [ + "Simple passwords and access control lists on shared mainframes", + "Dynamic passwords and formal role separation in growing networks", + "Public key infrastructure and attribute-aware policy concepts", + "MFA and single sign-on with centralized identity providers", + "Passkeys and Zero Trust continuous authorization" + ], + "correctAnswer": [ + "Simple passwords and access control lists on shared mainframes", + "Dynamic passwords and formal role separation in growing networks", + "Public key infrastructure and attribute-aware policy concepts", + "MFA and single sign-on with centralized identity providers", + "Passkeys and Zero Trust continuous authorization" + ], + "explanation": "Identity controls moved from simple shared secrets and ACLs toward stronger cryptography, centralized federation, and finally passwordless and continuously evaluated trust models.", + "tags": [ + "history", + "digital-identity", + "zero-trust", + "evolution" + ], + "references": [ + "Evolution of Digital Identity", + "Introduction" + ] + }, + { + "id": "auth-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "An architecture review compares several authentication methods and wants each one tied to the mechanism that best explains its risk profile.", + "prompt": "Match each method to its most accurate underlying mechanism.", + "premises": [ + "A legacy intranet still relies on a memorized credential stored as a verifier", + "A bank sends a six-digit code to a phone during sign-in", + "An administrator inserts a physical security key and touches it to approve login", + "A consumer signs in with a synced passkey unlocked by Face ID" + ], + "responses": [ + "Shared-secret authentication that remains vulnerable to reuse and phishing", + "Out-of-band one-time code that is still exposed to SIM-swapping or real-time proxy attacks", + "Origin-bound asymmetric cryptography requiring physical possession", + "Public-key credential that can sync across devices while staying resistant to phishing" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Passwords remain shared secrets, SMS codes are still interceptable or proxyable, hardware keys use possession plus asymmetric cryptography, and passkeys preserve the public-key model while improving usability through device sync.", + "tags": [ + "passwords", + "sms-otp", + "hardware-keys", + "passkeys" + ], + "references": [ + "Authentication Types", + "Evolution of Digital Identity" + ] + }, + { + "id": "auth-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A SaaS HR platform now has hundreds of narrowly tailored roles, and auditors can no longer explain why contractors retained payroll-edit permissions after changing departments.", + "prompt": "Which diagnosis best explains the core authorization failure?", + "options": [ + "Role explosion has made RBAC brittle, overlapping, and hard to audit", + "PKCE verifier leakage is causing privilege escalation across departments", + "ReBAC cannot model reporting hierarchies in enterprise systems", + "WebAuthn is incompatible with HR applications that use approval workflows" + ], + "correctAnswer": 0, + "explanation": "This is the classic RBAC scaling failure known as role explosion. When too many exception roles accumulate, users often inherit overlapping permissions that become difficult to review and revoke cleanly.", + "tags": [ + "rbac", + "role-explosion", + "auditing", + "enterprise-security" + ], + "references": [ + "Authorization Models", + "Evolution of Digital Identity" + ] + }, + { + "id": "auth-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A payroll export policy says finance staff may read classified reports only from managed devices during business hours and only when they request the export action.", + "prompt": "Which inputs belong in an ABAC decision for that policy?", + "options": [ + "The user's department claim", + "The report's classification level", + "The requested action such as read or export", + "Environmental context such as time and managed-device status", + "The color of the employee's laptop sticker" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "ABAC evaluates user, resource, action, and environmental attributes. Irrelevant trivia that is not part of policy logic should not influence the decision engine.", + "tags": [ + "abac", + "policy-engine", + "context-aware-access", + "attributes" + ], + "references": [ + "Authorization Models" + ] + }, + { + "id": "auth-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A collaboration tool wants a director to automatically see documents created by direct reports without manually assigning an exception role every time the org chart changes.", + "prompt": "Which authorization model fits that requirement best?", + "options": [ + "Relationship-Based Access Control", + "Role-Based Access Control", + "Attribute-Based Access Control", + "Access Control Lists managed per document owner" + ], + "correctAnswer": 0, + "explanation": "The access rule depends on the relationship between entities, not just a static role or a simple attribute set. ReBAC is designed for exactly these graph-like, relationship-driven permissions.", + "tags": [ + "rebac", + "hierarchies", + "document-access", + "relationships" + ], + "references": [ + "Authorization Models" + ] + }, + { + "id": "auth-q09", + "type": "true-false", + "difficulty": "expert", + "scenario": "A network team argues that requests originating from the corporate office should bypass expensive policy checks because the user already authenticated earlier in the day.", + "prompt": "True or false: in a Zero Trust architecture, internal network location can substitute for per-request authorization once the session is established.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Zero Trust explicitly rejects the idea of a permanently trusted network zone. Each interaction must be strongly authenticated and authorized using current trust context rather than location alone.", + "tags": [ + "zero-trust", + "continuous-authorization", + "network-security", + "least-privilege" + ], + "references": [ + "Authorization Models", + "Introduction" + ] + }, + { + "id": "auth-q10", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A frontend engineer wants to use an OAuth 2.0 access token as the application's login proof because adding OIDC feels redundant.", + "prompt": "What is the precise architectural objection to that plan?", + "options": [ + "An access token expresses delegated API authority, not a relying-party proof of who the user is", + "OAuth 2.0 cannot issue any tokens over HTTPS", + "Access tokens are always XML assertions designed only for SAML bridges", + "OAuth 2.0 requires hardware keys for every confidential client" + ], + "correctAnswer": 0, + "explanation": "OAuth 2.0 is primarily about delegated authorization. OIDC adds standardized identity semantics, especially the ID token, so the relying party can verify who authenticated.", + "tags": [ + "oauth2", + "oidc", + "identity", + "authorization" + ], + "references": [ + "OAuth 2.0 & OpenID Connect", + "Introduction" + ] + }, + { + "id": "auth-q11", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A mobile team is adding social sign-in and wants to understand which capabilities make the solution OpenID Connect instead of plain OAuth 2.0.", + "prompt": "Which additions are characteristic of OIDC?", + "options": [ + "Issuing a signed ID token that carries identity claims", + "Defining a standard /userinfo endpoint", + "Using the openid scope and standardized claims model", + "Replacing HTTPS bearer tokens with custom per-request signatures", + "Eliminating access tokens entirely from the flow" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "OIDC layers identity on top of OAuth 2.0 by standardizing identity claims, the ID token, and related endpoints. It does not eliminate access tokens or replace transport security with bespoke signature schemes.", + "tags": [ + "oidc", + "id-token", + "userinfo", + "claims" + ], + "references": [ + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q12", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are walking a team through the correct Authorization Code Flow with PKCE for a public client that cannot safely embed a client secret.", + "prompt": "Arrange the PKCE flow from the first client action to token issuance.", + "items": [ + "The client generates a code_verifier and derives a code_challenge", + "The client redirects the user to the authorization endpoint with the code_challenge", + "The user authenticates and consents at the identity provider", + "The identity provider redirects back with a short-lived authorization code", + "The client sends the authorization code and original code_verifier to the token endpoint", + "The identity provider validates the verifier against the stored challenge and returns tokens" + ], + "correctAnswer": [ + "The client generates a code_verifier and derives a code_challenge", + "The client redirects the user to the authorization endpoint with the code_challenge", + "The user authenticates and consents at the identity provider", + "The identity provider redirects back with a short-lived authorization code", + "The client sends the authorization code and original code_verifier to the token endpoint", + "The identity provider validates the verifier against the stored challenge and returns tokens" + ], + "explanation": "PKCE works because the code challenge is established before the redirect, and the original verifier is presented only during the token exchange. Token issuance must wait until the identity provider confirms that mathematical relationship.", + "tags": [ + "pkce", + "authorization-code-flow", + "public-clients", + "token-exchange" + ], + "references": [ + "PKCE Authorization Flow", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q13", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A malicious app on the same device intercepts the redirect URI and steals the authorization code returned to a mobile client.", + "prompt": "Why does the attack still fail if PKCE is implemented correctly?", + "options": [ + "The intercepted code can be redeemed only by a client that proves possession of the original code_verifier", + "The authorization code already contains the user's password hash", + "The redirect URI alone is enough to authenticate the client without any additional proof", + "The identity provider embeds the access token directly in the browser history entry" + ], + "correctAnswer": 0, + "explanation": "PKCE neutralizes code interception because the code alone is insufficient. The attacker also needs the original verifier whose hash matches the previously stored challenge.", + "tags": [ + "pkce", + "code-interception", + "mobile-security", + "oauth2" + ], + "references": [ + "PKCE Authorization Flow" + ] + }, + { + "id": "auth-q14", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A SPA security review rejects localStorage token handling and asks whether a Backend for Frontend redesign would materially improve the threat model.", + "prompt": "Which properties are true if the BFF pattern is implemented correctly?", + "options": [ + "OAuth and OIDC tokens remain on the server side rather than inside browser storage", + "The browser receives a session cookie that can be marked HttpOnly, Secure, and SameSite", + "The BFF can inject the access token when proxying requests to downstream APIs", + "The SPA must still read the raw access token from localStorage for every protected API call", + "The pattern reduces token theft from XSS because injected scripts cannot read HttpOnly cookies or server-side token caches" + ], + "correctAnswer": [ + 0, + 1, + 2, + 4 + ], + "explanation": "The BFF shifts bearer-token handling away from the browser and into a confidential backend. The browser works with a hardened session cookie instead, which substantially lowers token-exfiltration risk from XSS.", + "tags": [ + "bff", + "xss", + "session-cookies", + "token-handling" + ], + "references": [ + "BFF Pattern", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q15", + "type": "matching", + "difficulty": "expert", + "scenario": "An engineering manager wants every actor in the BFF architecture mapped to its exact responsibility before approving the redesign.", + "prompt": "Match each actor to its primary responsibility.", + "premises": [ + "SPA browser", + "BFF service", + "Identity provider", + "Protected microservice API" + ], + "responses": [ + "Sends application requests while automatically attaching the session cookie", + "Performs the confidential OIDC flow, caches tokens, and proxies outbound API calls", + "Authenticates the user and issues tokens to the confidential client", + "Validates the bearer token presented by the BFF and returns protected data" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "The browser never handles the raw bearer token in a proper BFF model. The BFF owns the confidential client behavior, the identity provider authenticates and issues tokens, and the protected API validates the token on each request.", + "tags": [ + "bff", + "oidc", + "sequence-flow", + "responsibilities" + ], + "references": [ + "BFF Pattern", + "Visualization" + ] + }, + { + "id": "auth-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A public SPA loads third-party analytics scripts, and the security team wants bearer tokens kept completely out of reach of any injected browser JavaScript.", + "prompt": "Which architecture most directly satisfies that requirement without abandoning OIDC?", + "options": [ + "Introduce a Backend for Frontend that owns the tokens and uses a secure session cookie with the browser", + "Keep the SPA as a public client and store the access token in localStorage with a short expiration", + "Use the Implicit Flow so the browser receives the token earlier in the redirect", + "Move the access token into sessionStorage and rely on CSP alone" + ], + "correctAnswer": 0, + "explanation": "The BFF pattern is specifically designed to remove raw tokens from browser storage and keep them server-side. That changes the exposure model in a way localStorage or sessionStorage cannot.", + "tags": [ + "bff", + "spa-security", + "token-exposure", + "oidc" + ], + "references": [ + "BFF Pattern", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q17", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A multi-tenant billing API returned Tenant B invoices after a Tenant A user manipulated a path parameter, even though the user had authenticated successfully.", + "prompt": "Which control should have been mandatory before the microservice read any invoice data?", + "options": [ + "Enforce the tenant claim from the JWT against the requested resource context on every request", + "Trust the tenant selected in the URL if the session cookie is valid", + "Hide other tenants from the frontend navigation and skip server-side checks", + "Issue one shared administrative token to the gateway and let services infer tenant after the query" + ], + "correctAnswer": 0, + "explanation": "Successful authentication does not imply correct tenant authorization. The service must validate that the token's tenant context matches the resource being accessed before any data is returned.", + "tags": [ + "multi-tenant", + "tenant-isolation", + "jwt-claims", + "authorization" + ], + "references": [ + "Authorization Models", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q18", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An AI assistant needs to summarize a signed-in executive's private messages without receiving broad, long-lived permissions across the whole organization.", + "prompt": "Which statements describe the correct token-exchange architecture for that agent?", + "options": [ + "The human user first authenticates through OIDC so there is a verifiable subject context", + "The AI agent authenticates itself when requesting the exchanged token", + "The authorization server mints a short-lived token with tightly constrained scope for the requested task", + "Client Credentials alone preserve the end user's identity across trust domains", + "The exchanged token should usually be broader than the user's original permission set to avoid agent retries" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "For an agent acting on behalf of a user, token exchange preserves human identity context while still authenticating the machine actor. The resulting token should be narrow, task-specific, and short-lived rather than broad or user-agnostic.", + "tags": [ + "ai-agents", + "token-exchange", + "least-privilege", + "delegation" + ], + "references": [ + "AI Agent Authentication", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q19", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A background reconciliation bot runs overnight and obtains its access token through the Client Credentials grant.", + "prompt": "True or false: that token should be interpreted as representing the bot's own identity rather than a delegated human user's identity.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Client Credentials identifies the client application or service itself. It does not preserve a human subject unless a separate delegation or token-exchange mechanism is involved.", + "tags": [ + "client-credentials", + "service-identity", + "machine-to-machine", + "oauth2" + ], + "references": [ + "AI Agent Authentication" + ] + }, + { + "id": "auth-q20", + "type": "matching", + "difficulty": "expert", + "scenario": "A CISO wants each access-control approach mapped to the type of policy problem it handles best before approving a platform-wide redesign.", + "prompt": "Match each scenario to the authorization model that fits it best.", + "premises": [ + "Finance staff may export reports only during business hours from managed devices", + "Every warehouse picker needs the same handheld-scanner permissions", + "A director can access documents owned by their direct reports", + "Every request to an internal admin console is re-evaluated using device posture and least privilege" + ], + "responses": [ + "ABAC", + "RBAC", + "ReBAC", + "Zero Trust" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "ABAC fits rich context-driven policies, RBAC works well for stable job functions, ReBAC models relationship-driven access, and Zero Trust requires ongoing verification and authorization for each interaction.", + "tags": [ + "authorization-models", + "abac", + "rbac", + "rebac", + "zero-trust" + ], + "references": [ + "Authorization Models" + ] + }, + { + "id": "auth-q21", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A team is building a modern mobile app and browser-based SPA and wants federated sign-in with developer-friendly JSON-based identity semantics.", + "prompt": "Which protocol is the best fit for that requirement?", + "options": [ + "OpenID Connect", + "SAML 2.0", + "OpenConnect", + "OAuth 1.0" + ], + "correctAnswer": 0, + "explanation": "OIDC is the modern identity layer built on OAuth 2.0 and is well suited to web, mobile, and SPA clients. SAML remains common in legacy enterprise SSO, but its XML-heavy model is less natural for modern app clients.", + "tags": [ + "oidc", + "saml", + "spa", + "mobile-auth" + ], + "references": [ + "OAuth 2.0 & OpenID Connect", + "Introduction" + ] + }, + { + "id": "auth-q22", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A network engineer says the same protocol stack can solve both remote VPN connectivity and browser-based identity federation because 'OpenConnect and OpenID Connect are basically the same thing.'", + "prompt": "Which correction is accurate?", + "options": [ + "OpenConnect is a VPN protocol suite, while OpenID Connect is an application-layer identity protocol", + "OpenConnect is the mobile version of OIDC and differs only in token format", + "OpenID Connect is used only for network tunneling, not user authentication", + "Both terms describe the same protocol, but one is the legacy name" + ], + "correctAnswer": 0, + "explanation": "OpenConnect is used for secure network tunneling and VPN interoperability. OpenID Connect is an identity layer for authentication built on top of OAuth 2.0.", + "tags": [ + "openconnect", + "openid-connect", + "protocols", + "federation" + ], + "references": [ + "OAuth 2.0 & OpenID Connect", + "Introduction" + ] + }, + { + "id": "auth-q23", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An enterprise wants phishing-resistant admin sign-in for privileged operators and is choosing which factors should be approved as the strategic standard.", + "prompt": "Which options align with that phishing-resistant goal?", + "options": [ + "FIDO2 hardware security keys", + "WebAuthn passkeys", + "SMS one-time passwords", + "TOTP authenticator app codes", + "Password plus security question" + ], + "correctAnswer": [ + 0, + 1 + ], + "explanation": "Hardware FIDO keys and passkeys rely on asymmetric cryptography and origin binding, which makes them far more resistant to phishing than code-based or knowledge-based factors.", + "tags": [ + "phishing-resistance", + "fido2", + "passkeys", + "admin-security" + ], + "references": [ + "Authentication Types", + "Evolution of Digital Identity" + ] + }, + { + "id": "auth-q24", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A BFF implementation is ready for production, but the security review finds the browser session cookie is missing important hardening attributes.", + "prompt": "Which cookie attribute set best matches the guidance for a secure BFF session?", + "options": [ + "HttpOnly, Secure, and SameSite", + "Public, Cacheable, and Path=/", + "HttpOnly only", + "Base64-encoded in localStorage" + ], + "correctAnswer": 0, + "explanation": "A BFF session cookie should be protected from client-side JavaScript, restricted to secure transport, and hardened against cross-site misuse. HttpOnly, Secure, and SameSite are the essential baseline attributes described for that pattern.", + "tags": [ + "cookies", + "bff", + "session-security", + "browser-hardening" + ], + "references": [ + "BFF Pattern" + ] + }, + { + "id": "auth-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A relying party needs standardized claims such as subject identifier and authentication time so it can establish a user's signed-in identity without inventing a custom profile contract.", + "prompt": "Which artifact should the client validate or consume for those standardized identity claims?", + "options": [ + "The access token", + "The ID token", + "The code_verifier", + "The SAML metadata document" + ], + "correctAnswer": 1, + "explanation": "OIDC defines the ID token specifically for verifiable identity claims. Access tokens are for resource authorization, while the code_verifier is a PKCE proof artifact rather than an identity container.", + "tags": [ + "id-token", + "claims", + "oidc", + "identity-proofs" + ], + "references": [ + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q26", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A native mobile app cannot safely embed a static client secret because the binary can be inspected or decompiled by adversaries.", + "prompt": "Which flow is the correct modern choice for that client?", + "options": [ + "Authorization Code Flow with PKCE", + "Client Credentials", + "Implicit Flow", + "Resource Owner Password Credentials" + ], + "correctAnswer": 0, + "explanation": "Public clients such as native mobile apps should use the Authorization Code Flow with PKCE because PKCE provides proof-of-possession for the token exchange without relying on a static client secret.", + "tags": [ + "pkce", + "native-apps", + "public-clients", + "oauth2" + ], + "references": [ + "PKCE Authorization Flow", + "OAuth 2.0 & OpenID Connect" + ] + }, + { + "id": "auth-q27", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A company is redesigning internal admin APIs around Zero Trust after discovering that a flat VPN was being treated as proof of safety.", + "prompt": "Which controls are consistent with a Zero Trust migration?", + "options": [ + "Applying least-privilege permissions to every service interaction", + "Using micro-segmentation or software-defined perimeters to narrow lateral movement", + "Continuously evaluating device health and other context signals", + "Treating VPN presence as sufficient standing trust for the rest of the day", + "Authenticating and authorizing every access attempt rather than trusting location" + ], + "correctAnswer": [ + 0, + 1, + 2, + 4 + ], + "explanation": "Zero Trust assumes nothing is trusted by default, including internal network position. It relies on least privilege, segmentation, and repeated trust evaluation instead of once-per-day perimeter checks.", + "tags": [ + "zero-trust", + "micro-segmentation", + "device-posture", + "continuous-verification" + ], + "references": [ + "Authorization Models", + "Introduction" + ] + }, + { + "id": "auth-q28", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An adaptive authentication engine must decide whether to step up verification on a high-value login attempt.", + "prompt": "Which signal most strongly supports a risk-based step-up decision?", + "options": [ + "The request comes from a familiar device fingerprint and expected IP range", + "The login appears to involve impossible travel and a previously unseen device fingerprint", + "The user logs in at the same time every Monday morning", + "The browser window is larger than usual on the login page" + ], + "correctAnswer": 1, + "explanation": "Adaptive authentication evaluates contextual anomalies such as suspicious IP changes and unfamiliar devices. Impossible travel and a new fingerprint are strong indicators that additional verification may be warranted.", + "tags": [ + "adaptive-auth", + "risk-based-auth", + "device-fingerprint", + "context-signals" + ], + "references": [ + "Authentication Types", + "Evolution of Digital Identity" + ] + }, + { + "id": "auth-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A shared SaaS platform routes employees from different customer tenants to distinct corporate identity providers before issuing tokens to the application.", + "prompt": "Which architectural component is doing the key federation work here?", + "options": [ + "An identity broker that selects an external identity provider based on tenant context", + "A Backend for Frontend that stores browser session cookies", + "A hardware authenticator attestation service for passkey enrollment", + "A local TOTP seed generator inside the SPA" + ], + "correctAnswer": 0, + "explanation": "In multi-tenant federation, an identity broker can direct authentication to different external identity providers based on tenant context while preserving a unified application experience.", + "tags": [ + "identity-broker", + "multi-tenant", + "federation", + "sso" + ], + "references": [ + "OAuth 2.0 & OpenID Connect", + "Authorization Models" + ] + }, + { + "id": "auth-q30", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An engineer drafts a Mermaid diagram for PKCE but shows the identity provider returning tokens immediately after the authorization code arrives, without any verifier validation.", + "prompt": "Which missing step must be inserted before token issuance for the sequence to be accurate?", + "options": [ + "The identity provider hashes the received code_verifier and compares it to the stored code_challenge", + "The browser writes the access token to localStorage so the frontend can replay it", + "The user signs the access token with their biometric template", + "The API gateway exchanges the code for a SAML assertion" + ], + "correctAnswer": 0, + "explanation": "A correct PKCE sequence must show the verifier check before token issuance. Without that comparison, the diagram omits the very control that protects public clients from intercepted authorization codes.", + "tags": [ + "visualization", + "pkce", + "sequence-diagram", + "oauth2" + ], + "references": [ + "Visualization", + "PKCE Authorization Flow" + ] + }, + { + "id": "auth-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A company must support a browser SPA, a native mobile app, and an internal AI assistant that acts on behalf of signed-in employees against tenant-isolated APIs, all while minimizing token exposure.", + "prompt": "Which combined architecture is the strongest fit for that requirement set?", + "options": [ + "Use a BFF for the SPA, Authorization Code Flow with PKCE for the mobile app, and token exchange for the AI assistant's user-scoped tasks", + "Store one long-lived refresh token in browser localStorage and share it with the mobile app and the AI assistant", + "Use the Implicit Flow for both the SPA and the mobile app, then give the AI assistant a broad Client Credentials token with admin scope", + "Use SAML assertions directly in browser storage for every client and let each downstream API infer tenant ownership" + ], + "correctAnswer": 0, + "explanation": "That combination matches each client type to the right trust model: the SPA avoids browser token exposure through a BFF, the mobile app uses PKCE because it is a public client, and the AI assistant uses narrow delegated tokens instead of broad standing privileges.", + "tags": [ + "architecture", + "bff", + "pkce", + "ai-agents", + "tenant-isolation" + ], + "references": [ + "BFF Pattern", + "PKCE Authorization Flow", + "AI Agent Authentication" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/bigo.quiz.json b/quiz-banks/bigo.quiz.json new file mode 100644 index 0000000..07c3abb --- /dev/null +++ b/quiz-banks/bigo.quiz.json @@ -0,0 +1,841 @@ +{ + "moduleId": "bigo", + "moduleTitle": "Big-O Notation", + "description": "Scenario-based assessment of asymptotic analysis, growth rates, tight bounds, amortized reasoning, and real-world algorithm tradeoffs.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "bigo-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A ranking service is choosing between two implementations for next quarter's traffic spike. One does about 40n log n units of work, while the other does about 0.002n^2.", + "prompt": "Which conclusion is safest for long-term scalability?", + "options": [ + "The 40n log n design is asymptotically preferable even if it is not fastest at today's smaller inputs", + "The 0.002n^2 design will always stay faster because its constant factor is smaller today", + "Both designs are the same asymptotic class because both depend on n", + "Big-O cannot compare these designs because one includes a decimal constant" + ], + "correctAnswer": 0, + "explanation": "n log n grows more slowly than n^2. Constants can delay the crossover point, but they do not change the long-run asymptotic ordering.", + "tags": [ + "growth-rates", + "scalability", + "tradeoffs" + ], + "references": [ + "Core Concepts", + "Real-World Applications" + ] + }, + { + "id": "bigo-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A fraud-detection pipeline currently compares every incoming record against every other record to detect duplicates.", + "prompt": "Which proposed changes can reduce the asymptotic time complexity of duplicate detection?", + "options": [ + "Sort the records once by key and then scan adjacent records", + "Keep the nested comparison loops but stop after the first 50 pair checks", + "Track seen keys in a hash set during a single pass", + "Run the quadratic comparison on more worker threads", + "Process the same pairwise comparison in batches of 10 records" + ], + "correctAnswer": [ + 0, + 2 + ], + "explanation": "Sorting followed by one pass changes the work to O(n log n), and a hash-set pass can make it O(n) average. Early stopping, batching, or fixed parallelism do not fundamentally repair quadratic worst-case growth.", + "tags": [ + "hashing", + "sorting", + "optimization", + "real-world" + ], + "references": [ + "Core Concepts", + "Real-World Applications" + ] + }, + { + "id": "bigo-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "At n = 500, a high-overhead indexed implementation with O(n log n) time beats a simpler O(n) implementation because the linear version performs expensive I/O.", + "prompt": "True or false: because the O(n log n) implementation wins at n = 500, it is asymptotically more efficient than the O(n) implementation.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Asymptotic efficiency concerns how growth behaves as n increases, not who wins at one measured input size. A higher-overhead linear algorithm can lose at small n and still scale better overall.", + "tags": [ + "benchmarking", + "asymptotics", + "tradeoffs" + ], + "references": [ + "Introduction", + "Core Concepts" + ] + }, + { + "id": "bigo-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "During a security review, you need to justify the complexity of code whose outer loop touches n users and whose inner loop halves a counter each time.", + "prompt": "Arrange the analysis steps from first to last.", + "items": [ + "State the combined bound as O(n log n)", + "Identify that the outer loop contributes a factor of n", + "Determine that the halving inner loop runs O(log n) times", + "Multiply the loop costs because the inner work occurs for each outer iteration", + "Drop constant factors and lower-order terms" + ], + "correctAnswer": [ + "Identify that the outer loop contributes a factor of n", + "Determine that the halving inner loop runs O(log n) times", + "Multiply the loop costs because the inner work occurs for each outer iteration", + "Drop constant factors and lower-order terms", + "State the combined bound as O(n log n)" + ], + "explanation": "You first isolate the cost of each loop, then multiply because the logarithmic inner work happens inside every outer iteration. After simplifying constants, the tight upper bound is O(n log n).", + "tags": [ + "nested-loops", + "tight-bounds", + "analysis-workflow" + ], + "references": [ + "Algorithm Analysis", + "Core Concepts" + ] + }, + { + "id": "bigo-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "An architecture review is mapping product tasks to their dominant asymptotic risk before traffic doubles.", + "prompt": "Match each workload to the most appropriate complexity class.", + "premises": [ + "Scan every record in a nightly export once", + "Locate a customer ID in a sorted array using midpoint checks", + "Compare every pair of student submissions for similarity", + "Try every possible subset of 40 optional features" + ], + "responses": [ + "O(n)", + "O(log n)", + "O(n^2)", + "O(2^n)" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "A full pass is linear, binary search is logarithmic, all-pairs comparison is quadratic, and exhaustive subset exploration is exponential.", + "tags": [ + "complexity-classes", + "workloads", + "mapping" + ], + "references": [ + "Common Complexities", + "Real-World Applications" + ] + }, + { + "id": "bigo-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A mobile analytics SDK appends events into a dynamic array and sees rare latency spikes whenever the buffer grows.", + "prompt": "Which characterization of append cost is most accurate over a long sequence of appends?", + "options": [ + "Each append is worst-case O(1)", + "Each append is Theta(log n) because capacity doubles", + "Append is O(1) amortized even though some individual appends are O(n)", + "Append is always O(n) because resizing eventually happens" + ], + "correctAnswer": 2, + "explanation": "Doubling arrays occasionally pay O(n) to copy elements during resize, but those expensive operations are spread across many cheap appends, giving O(1) amortized cost.", + "tags": [ + "amortized-analysis", + "dynamic-arrays", + "resizing" + ], + "references": [ + "Advanced Concepts", + "Real-World Applications" + ] + }, + { + "id": "bigo-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A data engineering team is reviewing several candidate cost formulas from prototype ETL jobs.", + "prompt": "Which candidate runtimes all simplify to O(n^2)?", + "options": [ + "n^2 + 200n", + "7n log n + n", + "0.5n^2 + n log n", + "n(n - 1)(n - 2)", + "3n^2 + 10" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Only expressions with a dominant quadratic term simplify to O(n^2). n(n - 1)(n - 2) is cubic, and 7n log n + n is O(n log n).", + "tags": [ + "dominant-term", + "simplification", + "algebra" + ], + "references": [ + "Common Complexities", + "Algorithm Analysis" + ] + }, + { + "id": "bigo-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An incident review shows a recursive search procedure branching into two calls on n - 1 remaining candidates, with no memoization and only constant extra work per call.", + "prompt": "What is the best worst-case upper bound for the running time?", + "options": [ + "O(n log n)", + "O(n^2)", + "O(2^n)", + "O(n!)" + ], + "correctAnswer": 2, + "explanation": "Branching into two calls of size n - 1 roughly doubles the amount of work at each level, producing exponential growth on the order of O(2^n). O(n!) would require even faster branching growth.", + "tags": [ + "recursion", + "exponential-time", + "search" + ], + "references": [ + "Advanced Concepts", + "Algorithm Analysis" + ] + }, + { + "id": "bigo-q09", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A batch job makes one full pass to validate n rows and a second separate full pass to compute totals.", + "prompt": "True or false: two separate full passes over n records should be classified as O(n^2).", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Sequential passes add rather than multiply: O(n) + O(n) = O(n). Two loops are quadratic only when one is nested inside the other.", + "tags": [ + "sequential-loops", + "addition-rule", + "analysis" + ], + "references": [ + "Introduction", + "Core Concepts" + ] + }, + { + "id": "bigo-q10", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A catalog service already stores products in sorted memory and needs to answer a single lookup by ID with the lowest asymptotic query cost.", + "prompt": "Which asymptotic lookup time is the best fit if the service uses binary search on the existing sorted array?", + "options": [ + "O(1)", + "O(log n)", + "O(n log n)", + "O(sqrt n)" + ], + "correctAnswer": 1, + "explanation": "With the array already sorted, binary search halves the remaining search interval each step, so lookup is O(log n). The sorting cost is separate if it has not already been paid.", + "tags": [ + "binary-search", + "logarithmic-time", + "sorted-data" + ], + "references": [ + "Common Complexities", + "Real-World Applications" + ] + }, + { + "id": "bigo-q11", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A graph analytics service uses BFS on adjacency lists, and a reviewer tries to oversimplify the complexity for documentation.", + "prompt": "Which statements are correct about the asymptotic cost of BFS on an adjacency-list graph?", + "options": [ + "Without extra assumptions, the traversal cost is O(n + m)", + "It is always safe to replace O(n + m) with O(n) because every graph has at most n edges", + "For dense graphs, the same bound can behave like O(n^2)", + "For trees, where m = n - 1, the bound simplifies to O(n)", + "The queue's maximum frontier size never matters for space complexity" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "BFS on adjacency lists processes each vertex and edge at most once, hence O(n + m). For dense graphs, m can be Theta(n^2), while tree inputs let the same expression simplify to O(n).", + "tags": [ + "graphs", + "bfs", + "sparse-vs-dense" + ], + "references": [ + "Algorithm Analysis", + "Real-World Applications" + ] + }, + { + "id": "bigo-q12", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A memory-constrained service traverses a very deep but narrow tree with recursive DFS.", + "prompt": "If the tree degenerates into a chain of n nodes, what is the worst-case auxiliary space from the call stack?", + "options": [ + "O(1)", + "O(log n)", + "O(n)", + "O(n^2)" + ], + "correctAnswer": 2, + "explanation": "Recursive DFS uses stack space proportional to traversal depth. In a chain-shaped tree, the height is n, so auxiliary stack space becomes O(n).", + "tags": [ + "space-complexity", + "dfs", + "recursion" + ], + "references": [ + "Algorithm Analysis", + "Advanced Concepts" + ] + }, + { + "id": "bigo-q13", + "type": "ordering", + "difficulty": "expert", + "scenario": "An autoscaling dashboard ranks candidate algorithms by long-run growth so the team can pick the safest design before launch.", + "prompt": "Order these complexity classes from slowest-growing to fastest-growing.", + "items": [ + "O(n log n)", + "O(1)", + "O(2^n)", + "O(log n)", + "O(n)" + ], + "correctAnswer": [ + "O(1)", + "O(log n)", + "O(n)", + "O(n log n)", + "O(2^n)" + ], + "explanation": "Constant growth is smallest, followed by logarithmic, then linear, then n log n, and finally exponential. The ranking reflects asymptotic growth, not current constants.", + "tags": [ + "growth-rates", + "ordering", + "common-complexities" + ], + "references": [ + "Common Complexities", + "Gamification Hub" + ] + }, + { + "id": "bigo-q14", + "type": "matching", + "difficulty": "expert", + "scenario": "A senior engineer is teaching new hires to recognize recurrence patterns quickly during design reviews.", + "prompt": "Match each recurrence-style description to the most appropriate complexity class.", + "premises": [ + "Repeatedly halve the remaining search interval and do constant work per step", + "Split into two halves, solve both, and linearly combine the results", + "Shrink the problem by one element and do constant extra work", + "Branch into two highly overlapping recursive calls without caching" + ], + "responses": [ + "O(log n)", + "O(n log n)", + "O(n)", + "O(2^n)" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Halving gives logarithmic time, divide-and-conquer with linear merge gives n log n, decrement-by-one gives linear time, and uncontrolled repeated branching produces exponential growth.", + "tags": [ + "recurrences", + "divide-and-conquer", + "matching" + ], + "references": [ + "Algorithm Analysis", + "Advanced Concepts" + ] + }, + { + "id": "bigo-q15", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A protocol parser loops over n packets, and for each packet it checks exactly 64 fixed flag bits.", + "prompt": "What is the tight time complexity of the parser?", + "options": [ + "O(1)", + "O(log n)", + "O(n)", + "O(n^2)" + ], + "correctAnswer": 2, + "explanation": "The inner loop is bounded by a fixed constant 64, so the total work is 64n, which simplifies to Theta(n). Fixed-width work does not create a second factor of n.", + "tags": [ + "tight-bounds", + "constants", + "protocols" + ], + "references": [ + "Algorithm Analysis", + "Core Concepts" + ] + }, + { + "id": "bigo-q16", + "type": "multi-select", + "difficulty": "expert", + "scenario": "During interview practice, you need to explain why a two-pointer strategy can beat a naive quadratic search on a sorted list.", + "prompt": "Which properties make the classic two-pointer approach run in O(n) time instead of O(n^2)?", + "options": [ + "The input order lets you reason monotonically about whether to move left or right", + "Each pointer only moves inward and never resets", + "Every comparison gives enough information to eliminate one candidate pair direction", + "The method requires hashing every element before the scan begins", + "The method still performs an inner loop across all remaining elements on each step" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "The O(n) two-pointer method relies on monotonic movement: each comparison discards one direction, and each pointer advances at most n times total. Adding hashing or nested rescans breaks the point of the technique.", + "tags": [ + "two-pointers", + "linear-time", + "practice" + ], + "references": [ + "Practice Challenges", + "Algorithm Analysis" + ] + }, + { + "id": "bigo-q17", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A CDN invalidation service inserts expirations continuously and must repeatedly retrieve the smallest expiry next.", + "prompt": "Which core structure gives the best asymptotic fit for inserts plus repeated extract-min operations?", + "options": [ + "Binary min-heap", + "Resorted dynamic array after every insert", + "Unordered hash map with periodic full scans", + "Linked list kept in insertion order" + ], + "correctAnswer": 0, + "explanation": "A binary min-heap supports O(log n) insert and O(log n) extract-min while keeping O(1) access to the smallest item. The other options require repeated resorting or full scans.", + "tags": [ + "heaps", + "priority-queues", + "scheduling" + ], + "references": [ + "Real-World Applications", + "Common Complexities" + ] + }, + { + "id": "bigo-q18", + "type": "true-false", + "difficulty": "expert", + "scenario": "A reviewer proves only that a cleanup algorithm runs in O(n) time and stops there.", + "prompt": "True or false: that upper bound alone is enough to conclude the algorithm is Theta(n).", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "O(n) only states an upper bound. Without a matching lower bound, the algorithm could still be Theta(log n), Theta(1), or some other smaller class that satisfies O(n).", + "tags": [ + "big-theta", + "upper-bounds", + "notation" + ], + "references": [ + "Advanced Concepts", + "Core Concepts" + ] + }, + { + "id": "bigo-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A ranking pipeline first deduplicates n items with a hash set in O(n) average time, then sorts the surviving k items, where k can be as large as n.", + "prompt": "What is the tightest worst-case upper bound in terms of n for the whole pipeline?", + "options": [ + "O(n)", + "O(log n)", + "O(n log n)", + "O(n^2)" + ], + "correctAnswer": 2, + "explanation": "The total work is O(n + k log k). In the worst case k = n, so the pipeline simplifies to O(n log n).", + "tags": [ + "pipeline-analysis", + "sorting", + "worst-case" + ], + "references": [ + "Algorithm Analysis", + "Real-World Applications" + ] + }, + { + "id": "bigo-q20", + "type": "matching", + "difficulty": "expert", + "scenario": "A performance review covers four optimization strategies and asks what kind of asymptotic change each one can realistically produce.", + "prompt": "Match each optimization to its most accurate asymptotic effect.", + "premises": [ + "Memoize overlapping states in a Fibonacci-like pricing model", + "Replace repeated linear membership checks with a hash set", + "Split a quadratic image comparison job across 8 workers", + "Sort a static table once and then answer many point lookups with binary search" + ], + "responses": [ + "Can reduce exponential work to linear or polynomial by reusing solved states", + "Often changes repeated membership checks from O(n) to O(1) average", + "Improves wall-clock time through parallelism but does not change the asymptotic class", + "Pays O(n log n) preprocessing to enable O(log n) queries" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Memoization changes how many unique states are explored, hash sets cut repeated lookup cost, fixed parallelism changes constants rather than class, and presorting trades upfront work for cheaper repeated queries.", + "tags": [ + "memoization", + "hash-sets", + "preprocessing", + "parallelism" + ], + "references": [ + "Advanced Concepts", + "Real-World Applications" + ] + }, + { + "id": "bigo-q21", + "type": "single-choice", + "difficulty": "expert", + "scenario": "In a design review, one engineer proves a merge routine can never do better than c1 n log n comparisons in the worst case, and another proves it never exceeds c2 n log n.", + "prompt": "Which notation best summarizes the routine's tight asymptotic growth?", + "options": [ + "O(n log n)", + "Omega(n log n)", + "Theta(n log n)", + "O(n^2)" + ], + "correctAnswer": 2, + "explanation": "Theta notation is used when matching asymptotic upper and lower bounds are both established. Having O(n log n) and Omega(n log n) means the tight class is Theta(n log n).", + "tags": [ + "theta-notation", + "tight-bounds", + "proofs" + ], + "references": [ + "Advanced Concepts", + "Core Concepts" + ] + }, + { + "id": "bigo-q22", + "type": "multi-select", + "difficulty": "expert", + "scenario": "The interactive playground is showing several code patterns, and you need to identify the ones that are logarithmic in n.", + "prompt": "Which patterns have logarithmic time complexity with respect to n?", + "options": [ + "Repeatedly halve n until it becomes 1", + "Binary search on a sorted array", + "Scan an unsorted array to find the maximum", + "Insert one element into a binary heap", + "Run merge sort on the entire array" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Halving loops, binary search, and heap insertion all grow logarithmically with input size. A full scan is linear, and merge sort over an entire array is O(n log n).", + "tags": [ + "logarithmic-time", + "interactive-playground", + "patterns" + ], + "references": [ + "Interactive Playground", + "Common Complexities" + ] + }, + { + "id": "bigo-q23", + "type": "ordering", + "difficulty": "expert", + "scenario": "A retrospective is documenting why append-heavy behavior in a dynamic array still scales well in the interactive playground.", + "prompt": "Order the reasoning steps that justify O(1) amortized append time.", + "items": [ + "Conclude the average append cost is O(1) amortized", + "Notice that most appends only write the new element", + "Add in the occasional resize operations that copy existing elements", + "Sum the copied elements across doublings as a geometric series", + "Divide total work by the number of appends" + ], + "correctAnswer": [ + "Notice that most appends only write the new element", + "Add in the occasional resize operations that copy existing elements", + "Sum the copied elements across doublings as a geometric series", + "Divide total work by the number of appends", + "Conclude the average append cost is O(1) amortized" + ], + "explanation": "Most appends are cheap, resizes are occasional, and the copied elements across repeated doublings form a geometric series. Dividing total work by the number of appends yields O(1) amortized cost.", + "tags": [ + "amortized-analysis", + "geometric-series", + "dynamic-arrays" + ], + "references": [ + "Advanced Concepts", + "Interactive Playground" + ] + }, + { + "id": "bigo-q24", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A recommendation service is comparing algorithm A with O(n log n) growth to algorithm B with O(n^1.1) growth for very large future datasets.", + "prompt": "Which asymptotic statement is correct?", + "options": [ + "O(n^1.1) eventually grows more slowly than O(n log n)", + "O(n log n) eventually grows more slowly than O(n^1.1)", + "They are asymptotically equivalent", + "The comparison can be decided only by benchmark constants" + ], + "correctAnswer": 1, + "explanation": "n^1.1 equals n multiplied by n^0.1, and n^0.1 eventually outgrows log n. That means n^1.1 ultimately grows faster than n log n, so O(n log n) is asymptotically better.", + "tags": [ + "growth-rates", + "n-log-n", + "polynomials" + ], + "references": [ + "Common Complexities", + "Advanced Concepts" + ] + }, + { + "id": "bigo-q25", + "type": "true-false", + "difficulty": "expert", + "scenario": "An operations team measures runtime at one fixed input size and wants to declare the slower implementation has the worse Big-O class immediately.", + "prompt": "True or false: one benchmark measurement at a single n is enough to determine an algorithm's Big-O class reliably.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Big-O is about how work grows as input size changes. A single benchmark point cannot separate asymptotic behavior from constants, cache effects, or measurement noise.", + "tags": [ + "benchmarking", + "algorithm-analysis", + "measurement" + ], + "references": [ + "Algorithm Analysis", + "Gamification Hub" + ] + }, + { + "id": "bigo-q26", + "type": "matching", + "difficulty": "expert", + "scenario": "A product team is categorizing the dominant complexity of several features before setting performance budgets.", + "prompt": "Match each feature to the most appropriate asymptotic expectation.", + "premises": [ + "Autocomplete walks each character of the query down a trie", + "A billing export visits each invoice exactly once", + "A moderation tool computes pairwise similarity for every pair of n documents", + "A release manager tests every combination of n optional flags" + ], + "responses": [ + "O(k) in the query length", + "O(n)", + "O(n^2)", + "O(2^n)" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Trie lookup depends on query length k, a full export is linear in the number of records, all-pairs comparison is quadratic, and exhaustive feature combinations are exponential.", + "tags": [ + "tries", + "quadratic", + "exponential", + "applications" + ], + "references": [ + "Real-World Applications", + "Common Complexities" + ] + }, + { + "id": "bigo-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "During code review, an engineer claims a loop that starts at 1 and doubles i until i >= n should be considered linear because i eventually reaches n.", + "prompt": "If the loop does constant work each iteration, what is the time complexity?", + "options": [ + "O(log n)", + "O(n)", + "O(sqrt n)", + "O(1)" + ], + "correctAnswer": 0, + "explanation": "Doubling i reaches n after about log2 n iterations. With constant work per iteration, the total time is O(log n).", + "tags": [ + "logarithmic-time", + "loops", + "code-review" + ], + "references": [ + "Algorithm Analysis", + "Interactive Playground" + ] + }, + { + "id": "bigo-q28", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is evaluating whether memoization will meaningfully reduce the runtime of a recursive solver before shipping it.", + "prompt": "Which conditions must hold for memoization to reduce asymptotic work significantly?", + "options": [ + "Many calls revisit the same logical subproblem", + "Each subproblem can be identified with a stable cache key", + "Every recursive branch produces a completely unique state", + "The work per cached state remains manageable once the cache is introduced", + "The cache is cleared before sibling branches can reuse results" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Memoization helps only when subproblems repeat and can be keyed so cached results are reusable. If every state is unique or the cache is cleared before reuse, the asymptotic benefit disappears.", + "tags": [ + "memoization", + "dynamic-programming", + "state-space" + ], + "references": [ + "Advanced Concepts", + "Practice Challenges" + ] + }, + { + "id": "bigo-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A shard reprocessor handles n items in round one, then n/2, then n/4, then n/8, and continues until no items remain.", + "prompt": "What tight bound best matches the total work across all rounds?", + "options": [ + "O(log n)", + "O(n)", + "O(n log n)", + "O(n^2)" + ], + "correctAnswer": 1, + "explanation": "The total work forms the geometric series n + n/2 + n/4 + ..., which is bounded by 2n. That makes the tight bound O(n).", + "tags": [ + "geometric-series", + "aggregate-analysis", + "tight-bounds" + ], + "references": [ + "Algorithm Analysis", + "Advanced Concepts" + ] + }, + { + "id": "bigo-q30", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A team rewrites a recursive divide-and-conquer routine iteratively and assumes the asymptotic time class must improve.", + "prompt": "True or false: rewriting a recursive algorithm iteratively automatically improves its asymptotic time complexity.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Changing recursion to iteration may reduce call-stack usage or constant overhead, but it does not automatically change how total work grows with n.", + "tags": [ + "iteration-vs-recursion", + "time-complexity", + "refactoring" + ], + "references": [ + "Core Concepts", + "Advanced Concepts" + ] + }, + { + "id": "bigo-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "The gamification hub shows algorithm X with O(n^2) time beating algorithm Y with O(n log n) on today's benchmark at n = 1000.", + "prompt": "Which conclusion is most defensible?", + "options": [ + "X is asymptotically better because it wins on the current benchmark", + "Y can still be the asymptotically better choice even if X is faster at today's scale", + "Cache effects make asymptotic analysis irrelevant", + "Both algorithms have the same class because both compare elements" + ], + "correctAnswer": 1, + "explanation": "Current benchmark wins do not override asymptotic growth. An O(n^2) algorithm can beat O(n log n) at small n, but the lower-growth algorithm is usually the safer long-run scaling choice.", + "tags": [ + "benchmarking", + "gamification", + "scaling-decisions" + ], + "references": [ + "Gamification Hub", + "Real-World Applications" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/datastructures.quiz.json b/quiz-banks/datastructures.quiz.json new file mode 100644 index 0000000..5ae6f31 --- /dev/null +++ b/quiz-banks/datastructures.quiz.json @@ -0,0 +1,530 @@ +{ + "moduleId": "datastructures", + "moduleTitle": "Data Structures", + "description": "Scenario-driven assessment of advanced data structure selection, invariants, and performance tradeoffs in real systems.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "datastructures-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A distributed job runner keeps receiving new tasks with deadlines, and the scheduler repeatedly needs the next task with the smallest deadline while inserts continue throughout the day.", + "prompt": "Which core structure is the best fit for this workload if ordered iteration of all tasks is not required?", + "explanation": "A binary min-heap is designed for repeated insert plus extract-min operations, both in O(log n), with O(1) access to the current minimum. The other options do not match the priority-queue access pattern as directly.", + "tags": ["heaps", "priority-queue", "scheduling", "performance"], + "references": ["Heaps", "Real-World Applications", "Complexity Analysis"], + "options": [ + "Sorted dynamic array of tasks", + "Binary min-heap priority queue", + "FIFO queue of tasks", + "Hash set of tasks" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q02", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "You are implementing a bounded packet buffer for a network appliance using a circular array so the queue never allocates after startup.", + "prompt": "Which rules are necessary for the array-backed ring buffer to behave as a correct FIFO queue?", + "explanation": "A circular queue must advance head after dequeue and tail after enqueue using wraparound arithmetic, and it must distinguish full from empty with a size counter or by reserving one slot. Overwriting on full and removing from the tail would violate the intended FIFO semantics.", + "tags": ["queues", "arrays", "ring-buffer", "fifo"], + "references": ["Queues", "Arrays", "Complexity Analysis"], + "options": [ + "Advance the head index after each dequeue using modulo capacity", + "Overwrite the oldest element automatically whenever the buffer is full", + "Track size or reserve one empty slot so full and empty states are distinguishishable", + "Remove items from the tail to preserve FIFO order", + "Advance the tail index after each enqueue using modulo capacity" + ], + "correctAnswer": [0, 2, 4] + }, + { + "id": "datastructures-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "An engineer is debugging an AVL tree insert path and claims the repair process may require rebalancing at multiple different ancestors after a single insertion.", + "prompt": "True or false: after one insertion into an AVL tree, fixing the first unbalanced ancestor with the appropriate single or double rotation is sufficient because the subtree height above that point stops increasing.", + "explanation": "For AVL insertion, the first unbalanced node encountered on the path to the root is the only rotation site needed. After the correct rotation, the subtree height is restored so no additional rebalancing rotations are required higher up.", + "tags": ["avl", "rotations", "balance", "trees"], + "references": ["AVL Trees", "Binary Search Trees"], + "options": ["True", "False"], + "correctAnswer": 0 + }, + { + "id": "datastructures-q04", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A routing simulator stores a sparse road network with millions of intersections, and the hot path repeatedly iterates through each vertex's outgoing neighbors.", + "prompt": "Which graph representation best matches this workload?", + "explanation": "For sparse graphs with frequent neighbor iteration, adjacency lists are the standard choice because they avoid the O(V^2) space of a matrix and make iteration over existing neighbors efficient.", + "tags": ["graphs", "adjacency-list", "representation", "sparse-graph"], + "references": ["Graph Representation", "Graph Structures", "Complexity Analysis"], + "options": [ + "Adjacency matrix", + "Edge list scanned for every query", + "Adjacency list", + "Binary heap" + ], + "correctAnswer": 2 + }, + { + "id": "datastructures-q05", + "type": "ordering", + "difficulty": "expert", + "scenario": "A production scheduler uses a binary min-heap. During an extract-min operation, the root has already been chosen for removal and the team wants the exact repair sequence reviewed.", + "prompt": "Order the heap-repair steps after removing the root element.", + "explanation": "The standard extract-min repair moves the last element to the root, shrinks the heap, then repeatedly compares with the smaller child and swaps downward until the heap property holds again.", + "tags": ["heaps", "extract-min", "ordering", "repair"], + "references": ["Heaps", "Practice Problems"], + "items": [ + "Repeat the comparison-and-swap process until the heap property holds", + "Move the last element into the root position", + "Compare the current node with its smaller child", + "Reduce the heap size by one", + "Swap with the smaller child if the heap property is violated" + ], + "correctAnswer": [ + "Move the last element into the root position", + "Reduce the heap size by one", + "Compare the current node with its smaller child", + "Swap with the smaller child if the heap property is violated", + "Repeat the comparison-and-swap process until the heap property holds" + ] + }, + { + "id": "datastructures-q06", + "type": "matching", + "difficulty": "advanced", + "scenario": "An architecture review is mapping subsystems to the simplest data structure that matches their dominant access pattern.", + "prompt": "Match each subsystem to the most appropriate primary data structure.", + "explanation": "Back and undo navigation is naturally LIFO, print spooling is FIFO, a live leaderboard needs priority access, and LRU ordering relies on constant-time node movement in a doubly linked list.", + "tags": ["matching", "real-world", "access-patterns", "selection"], + "references": ["Stacks", "Queues", "Heaps", "Linked Lists", "Real-World Applications"], + "premises": [ + "Browser back navigation history", + "Print spooler processing jobs in submission order", + "Real-time leaderboard that must expose the top score quickly", + "LRU cache eviction order maintenance" + ], + "responses": ["Stack", "Queue", "Max-heap", "Doubly linked list"], + "correctAnswer": [0, 1, 2, 3] + }, + { + "id": "datastructures-q07", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A storage engine is designing an on-disk index. The team wants high fan-out so tree height stays low and range scans touch as few disk pages as possible.", + "prompt": "Which structure is the best fit?", + "explanation": "B-trees are optimized for external storage by storing many keys per node, reducing height and the number of block reads. AVL trees and heaps are not designed around page-oriented fan-out.", + "tags": ["b-trees", "disk-index", "range-scan", "storage"], + "references": ["B-Trees", "Tree Structures", "Real-World Applications"], + "options": ["AVL tree", "B-tree", "Binary heap", "Hash set"], + "correctAnswer": 1 + }, + { + "id": "datastructures-q08", + "type": "multi-select", + "difficulty": "expert", + "scenario": "You are implementing an O(1) LRU cache for API responses and need to verify the design before coding.", + "prompt": "Which components or behaviors are required for the classic constant-time LRU approach?", + "explanation": "The standard LRU design combines a hash table for direct key lookup with a doubly linked list for recency order, and it moves touched nodes in O(1). A BST is unnecessary, and full compaction on hits would destroy the required performance.", + "tags": ["lru-cache", "hash-tables", "linked-lists", "design"], + "references": ["Hash Tables", "Linked Lists", "Real-World Applications"], + "options": [ + "A hash table from key to linked-list node", + "A doubly linked list that stores recency order", + "A binary search tree sorted by key", + "An O(1) operation that moves an accessed node to the most-recent position", + "A full compaction pass after every cache hit" + ], + "correctAnswer": [0, 1, 3] + }, + { + "id": "datastructures-q09", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A compiler front-end stores tokens for a parser that frequently peeks several positions ahead and jumps to arbitrary token indices during error recovery.", + "prompt": "Which representation best supports that access pattern?", + "explanation": "A resizable array gives O(1) indexing and strong cache locality, which is exactly what indexed parser lookahead benefits from. Linked lists are poor for arbitrary indexed access.", + "tags": ["arrays", "linked-lists", "parsing", "locality"], + "references": ["Arrays", "Linked Lists", "Strings", "Real-World Applications"], + "options": [ + "A singly linked list, because random access remains constant time", + "A resizable array, because it supports indexed access efficiently", + "A queue, because parsing is only FIFO consumption", + "A hash set, because token positions are unique" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q10", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A stream-processing service uses a monotonic deque to compute sliding-window maxima over a very long metric series.", + "prompt": "True or false: when a new value enters the window, removing smaller values from the back of the deque preserves correctness and keeps update cost amortized O(1).", + "explanation": "A monotonic deque maintains candidates in decreasing order. Any smaller value behind a newly inserted larger value can never become the maximum before the newer value leaves the window, so removing it is both correct and efficient.", + "tags": ["deques", "sliding-window", "amortized-analysis", "streaming"], + "references": ["Deques", "Queues", "Complexity Analysis"], + "options": ["False", "True"], + "correctAnswer": 1 + }, + { + "id": "datastructures-q11", + "type": "single-choice", + "difficulty": "intermediate", + "scenario": "A recommendation system receives two massive campaign exports of viewer IDs and needs fast deduplication plus repeated intersection checks between the two populations.", + "prompt": "Which structure is the best first choice for each campaign's collection?", + "explanation": "A hash set is built around membership and uniqueness, making deduplication and set operations such as intersection straightforward and efficient when element order is irrelevant.", + "tags": ["sets", "membership", "deduplication", "analytics"], + "references": ["Sets", "Hash Tables", "Real-World Applications"], + "options": [ + "Hash set of viewer IDs", + "Queue of viewer IDs in arrival order", + "Stack of viewer IDs", + "Singly linked list without membership index" + ], + "correctAnswer": 0 + }, + { + "id": "datastructures-q12", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A teammate claims a red-black tree fix-up is complete after insertion because the tree still looks roughly balanced by eye.", + "prompt": "Which properties must hold after the insertion repair is actually correct?", + "explanation": "Red-black trees require uniform black-height across root-to-leaf paths, no red node with a red child, and preservation of BST in-order ordering. The root is not required to be red, and leaves are not stored as contiguous array positions.", + "tags": ["red-black-trees", "invariants", "bst", "rebalancing"], + "references": ["Red-Black Trees", "Binary Search Trees", "Tree Structures"], + "options": [ + "Every path from a node to descendant NIL leaves has the same number of black nodes", + "The root must be red to simplify future insertions", + "No red node has a red child", + "An in-order traversal still yields keys in sorted order", + "All leaves must occupy contiguous array slots" + ], + "correctAnswer": [0, 2, 3] + }, + { + "id": "datastructures-q13", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A job scheduler must support insert, extract-highest-priority, and priority updates for arbitrary job IDs that may already be in the structure.", + "prompt": "Which design best supports all three operations efficiently?", + "explanation": "A heap provides efficient priority ordering, and a hash map from job ID to heap index lets the scheduler locate any existing job before bubbling it up or down. Without the index map, arbitrary updates are awkward.", + "tags": ["heaps", "hash-tables", "priority-update", "scheduler"], + "references": ["Heaps", "Hash Tables", "Real-World Applications"], + "options": [ + "Binary heap plus a hash map from job ID to current heap index", + "Plain FIFO queue", + "Sorted singly linked list", + "B-tree keyed only by insertion time" + ], + "correctAnswer": 0 + }, + { + "id": "datastructures-q14", + "type": "matching", + "difficulty": "expert", + "scenario": "A platform team is pairing graph problems with the algorithm that should be applied first during incident-response tooling design.", + "prompt": "Match each graph problem to the most appropriate algorithm.", + "explanation": "Unweighted shortest-hop paths use BFS, nonnegative weighted shortest paths use Dijkstra, minimum-cost network connection uses Prim, and recursive full exploration from a start node is naturally DFS.", + "tags": ["graphs", "algorithms", "matching", "optimization"], + "references": [ + "Shortest Path", + "Minimum Spanning Tree", + "Graph Traversal", + "Graph Structures" + ], + "premises": [ + "Compute the fewest hops between two services in an unweighted dependency graph", + "Compute the least-latency route when edge weights are nonnegative", + "Connect all offices with minimum total cable cost", + "Recursively explore an entire reachable component from a root node" + ], + "responses": [ + "Breadth-first search", + "Dijkstra's algorithm", + "Prim's algorithm", + "Depth-first search" + ], + "correctAnswer": [0, 1, 2, 3] + }, + { + "id": "datastructures-q15", + "type": "ordering", + "difficulty": "advanced", + "scenario": "A service graph has edges A-B, A-C, B-D, C-E, and D-F. During a breadth-first traversal from A, neighbors are enqueued alphabetically.", + "prompt": "Order the nodes by the sequence in which BFS dequeues and visits them.", + "explanation": "BFS visits level by level: first A, then its neighbors B and C, then D and E from the next frontier, and finally F from D.", + "tags": ["bfs", "graph-traversal", "ordering", "practice"], + "references": ["Graph Traversal", "Graph Overview", "Practice Problems"], + "items": ["E", "B", "A", "F", "D", "C"], + "correctAnswer": ["A", "B", "C", "D", "E", "F"] + }, + { + "id": "datastructures-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A public API is under a deliberate hash-collision attack, and the engineering team wants a mitigation that improves the worst-case complexity of overloaded buckets.", + "prompt": "Which strategy changes bucket lookup from linear time toward logarithmic time once collisions become extreme?", + "explanation": "Treeifying highly-collided buckets with a balanced tree such as a red-black tree changes operations inside those buckets from O(k) to O(log k). Lowering load factor helps average behavior but does not solve adversarial worst cases by itself.", + "tags": ["hash-tables", "security", "collisions", "red-black-trees"], + "references": ["Hash Tables", "Red-Black Trees", "Complexity Analysis"], + "options": [ + "Switch to linear probing only", + "Use separate chaining and convert overloaded buckets to balanced trees", + "Lower the load factor target but keep linked-list buckets forever", + "Replace the table with a queue" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q17", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "An embedded runtime is considering a singly linked list instead of a dynamic array for a task list because memory usage patterns are irregular.", + "prompt": "Which tradeoffs are genuinely true for the singly linked list choice?", + "explanation": "Linked lists support O(1) head insertion, but they add pointer overhead and lose the spatial locality of arrays. Random access is not constant time, and tail append is not always O(1) unless a tail pointer is maintained.", + "tags": ["linked-lists", "arrays", "tradeoffs", "memory"], + "references": ["Linked Lists", "Arrays", "Complexity Analysis"], + "options": [ + "Inserting at the head can be O(1) without shifting existing elements", + "Random access to the kth element remains O(1)", + "Each node usually carries pointer overhead in addition to payload data", + "Traversal is typically less cache-friendly than an array", + "Appending to the tail is always O(1) even without a tail pointer" + ], + "correctAnswer": [0, 2, 3] + }, + { + "id": "datastructures-q18", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A database engineer is reviewing page-split logic for B-tree insertion after a node becomes full.", + "prompt": "Statement check: when a full internal B-tree node splits, the median key is promoted to the parent so the resulting child pages still cover sorted key ranges.", + "explanation": "That is exactly how B-tree splitting preserves sorted partitioning and keeps the tree balanced. Promoting the median divides the key space cleanly between the two children.", + "tags": ["b-trees", "page-split", "storage", "invariants"], + "references": ["B-Trees", "Tree Structures"], + "options": ["Statement is true", "Statement is false"], + "correctAnswer": 0 + }, + { + "id": "datastructures-q19", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A memory allocator maintains a free-list of blocks and frequently removes a known block from the middle once a pointer to its node is already available.", + "prompt": "Which structure most directly supports O(1) splicing for that removal pattern?", + "explanation": "A doubly linked list can remove a known node in O(1) by relinking predecessor and successor pointers. Arrays and queues require shifting or do not target middle-node splicing.", + "tags": ["linked-lists", "splicing", "allocator", "mutation"], + "references": ["Linked Lists", "Real-World Applications", "Linear Structures"], + "options": ["Dynamic array", "Doubly linked list", "Binary heap", "Circular queue"], + "correctAnswer": 1 + }, + { + "id": "datastructures-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A simulation uses a dense graph of 50,000 entities where nearly every entity can interact with most others, and the hottest operation is checking whether an edge already exists.", + "prompt": "Which representation makes direct edge-existence checks simplest, despite its memory cost?", + "explanation": "For dense graphs, an adjacency matrix provides O(1) edge existence checks by indexing directly into the matrix. Its memory cost is high, but in dense cases the tradeoff can be justified.", + "tags": ["graphs", "adjacency-matrix", "dense-graph", "lookup"], + "references": ["Graph Representation", "Graph Types", "Complexity Analysis"], + "options": [ + "Adjacency list", + "Adjacency matrix", + "Singly linked list per vertex", + "Binary search tree of vertices" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q21", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A performance postmortem is analyzing why an array-backed event buffer showed occasional latency spikes even though append was documented as amortized O(1).", + "prompt": "Which events can legitimately produce an O(n) cost in that design?", + "explanation": "Dynamic arrays can incur linear work when growth or aggressive shrinking copies many elements, and front insertions require shifting existing values. Simple reads or appends while spare capacity remains do not trigger that spike.", + "tags": ["arrays", "amortized-analysis", "latency", "resizing"], + "references": ["Arrays", "Complexity Analysis", "Linear Structures"], + "options": [ + "A growth resize that copies all current elements into a larger buffer", + "Inserting at index 0 and shifting existing elements right", + "Reading the last element by index", + "Appending while spare capacity already exists", + "Shrinking aggressively after many pops and copying elements into a smaller buffer" + ], + "correctAnswer": [0, 1, 4] + }, + { + "id": "datastructures-q22", + "type": "single-choice", + "difficulty": "intermediate", + "scenario": "A messaging appliance has a fixed memory budget and needs a bounded queue where producers enqueue at one end and consumers dequeue at the other without any runtime allocation.", + "prompt": "Which implementation is the most appropriate starting point?", + "explanation": "A circular array queue matches a bounded FIFO buffer well because it uses fixed storage, supports wraparound, and avoids heap allocation after initialization.", + "tags": ["queues", "bounded-buffer", "arrays", "systems"], + "references": ["Queues", "Arrays", "Real-World Applications"], + "options": [ + "Fixed-size circular array queue", + "AVL tree", + "Hash table", + "Singly linked list with no tail pointer" + ], + "correctAnswer": 0 + }, + { + "id": "datastructures-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A work-stealing task scheduler lets each worker push and pop local tasks from one end, while idle workers steal from the opposite end.", + "prompt": "Which structure best matches this access pattern?", + "explanation": "A deque is explicitly built for efficient insertion and removal at both ends, which is why it appears in work-stealing scheduler designs.", + "tags": ["deques", "work-stealing", "scheduler", "concurrency"], + "references": ["Deques", "Real-World Applications"], + "options": ["Deque", "Stack", "Binary search tree", "Set"], + "correctAnswer": 0 + }, + { + "id": "datastructures-q24", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "A team is deciding whether to store session IDs in a hash table or a balanced binary search tree.", + "prompt": "Which requirements specifically favor the balanced binary search tree option?", + "explanation": "Balanced BSTs are the right choice when ordered iteration, range queries, or guaranteed O(log n) worst-case lookup matter. Hash tables dominate when only average-case exact-match membership is needed.", + "tags": ["bst", "hash-tables", "tradeoffs", "ordered-data"], + "references": ["Binary Search Trees", "Hash Tables", "Complexity Analysis"], + "options": [ + "The system needs ordered iteration by session key", + "The system needs guaranteed O(log n) worst-case lookup without relying on hash quality", + "The system only needs average O(1) exact-match membership checks and no ordering", + "The system needs range queries such as all keys between K1 and K2", + "The system never iterates and only does exact-key lookups" + ], + "correctAnswer": [0, 1, 3] + }, + { + "id": "datastructures-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A timer service must return the next expiring timer quickly, while also supporting cancellation and rescheduling by timer ID.", + "prompt": "Which design is the strongest fit?", + "explanation": "A min-heap gives fast access to the next expiration, and a hash table from timer ID to heap slot allows efficient cancellation or rescheduling of arbitrary existing timers.", + "tags": ["heaps", "timers", "hash-tables", "systems"], + "references": ["Heaps", "Hash Tables", "Real-World Applications"], + "options": [ + "Unsorted array only", + "Min-heap combined with a hash table from timer ID to heap slot", + "Singly linked list", + "Hash set only" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q26", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A service that models partner relationships as next pointers occasionally loops forever during traversal, suggesting a cycle in the linked structure.", + "prompt": "Which technique detects the cycle without extra memory proportional to the list length?", + "explanation": "Floyd's tortoise-and-hare algorithm uses two pointers moving at different speeds. If a cycle exists, the fast pointer eventually meets the slow pointer without needing a visited set.", + "tags": ["linked-lists", "cycle-detection", "practice", "pointers"], + "references": ["Linked Lists", "Practice Problems"], + "options": [ + "Maintain an adjacency matrix", + "Use Floyd's tortoise-and-hare traversal", + "Rebalance the structure with AVL rotations", + "Sort the nodes by memory address first" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q27", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An ETL pipeline inserts already sorted keys into a plain binary search tree implementation that performs no self-balancing.", + "prompt": "Evaluate the statement: in this situation, search and insertion can degrade from O(log n) expected behavior to O(n) because the tree may collapse into a chain.", + "explanation": "That statement is true. Sorted insertion order into a naive BST creates a highly skewed tree, and operations follow a path whose length approaches n.", + "tags": ["bst", "degeneration", "complexity", "worst-case"], + "references": ["Binary Search Trees", "Complexity Analysis"], + "options": ["The statement is false", "The statement is true"], + "correctAnswer": 1 + }, + { + "id": "datastructures-q28", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An incident tool needs the minimum-hop path between two services in a huge unweighted dependency graph that already fits in memory.", + "prompt": "Which algorithm is the correct and simplest first choice?", + "explanation": "Breadth-first search explores nodes by increasing distance in an unweighted graph, so the first time it reaches the destination it has found a minimum-hop path.", + "tags": ["graphs", "bfs", "shortest-path", "incident-response"], + "references": ["Shortest Path", "Graph Traversal", "Graph Structures"], + "options": [ + "Depth-first search with recursion", + "Breadth-first search", + "Prim's algorithm", + "Bellman-Ford" + ], + "correctAnswer": 1 + }, + { + "id": "datastructures-q29", + "type": "matching", + "difficulty": "advanced", + "scenario": "A database engineer is comparing several tree families and wants to align each one with the property that most clearly defines it.", + "prompt": "Match each structure to its defining property.", + "explanation": "AVL trees enforce tight balance by tracking height differences, red-black trees use coloring and black-height rules, heaps care about parent-child priority in a complete tree, and B-trees are multiway trees optimized for storage blocks.", + "tags": ["trees", "matching", "invariants", "comparison"], + "references": ["AVL Trees", "Red-Black Trees", "Heaps", "B-Trees"], + "premises": ["AVL tree", "Red-black tree", "Binary heap", "B-tree"], + "responses": [ + "Height is kept tightly balanced using balance factors", + "Color rules keep black-height uniform across root-to-leaf paths", + "A complete tree maintains parent ordering relative to children", + "A multiway node layout is optimized for block-based storage" + ], + "correctAnswer": [0, 1, 2, 3] + }, + { + "id": "datastructures-q30", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A design review document makes several claims about asymptotic behavior across common data structures, and you need to approve only the accurate ones.", + "prompt": "Select all correct claims.", + "explanation": "Average O(1) in hash tables does not imply worst-case O(1), heaps expose their extremum in O(1) but do not support efficient arbitrary search, and balanced BSTs can traverse all keys in sorted order in O(n) total time.", + "tags": ["complexity", "analysis", "tradeoffs", "review"], + "references": [ + "Complexity Analysis", + "Hash Tables", + "Heaps", + "Binary Search Trees", + "Graph Representation" + ], + "options": [ + "A hash table's average O(1) lookup does not guarantee O(1) worst-case under severe collisions", + "A binary heap can return the minimum in O(1), but arbitrary search is not generally O(log n)", + "An adjacency matrix stores only existing edges, so it is optimal for sparse graphs", + "A balanced BST can output all keys in sorted order in O(n) total time", + "A queue cannot be implemented with linked lists" + ], + "correctAnswer": [0, 1, 3] + }, + { + "id": "datastructures-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A 3D editor stores scene objects with parent-child transforms so that moving a parent object should update an entire hierarchy of descendants coherently.", + "prompt": "Which abstraction best models this hierarchy?", + "explanation": "A tree captures parent-child structure directly, making subtree traversal and hierarchical transform propagation natural. The other options discard or flatten the relationships the editor depends on.", + "tags": ["trees", "scene-graph", "3d", "hierarchy"], + "references": ["Tree Structures", "3D Visualizations", "Real-World Applications"], + "options": [ + "Tree with parent-child links", + "Hash table with no edges", + "Queue ordered by creation time", + "Flat set of unrelated nodes" + ], + "correctAnswer": 0 + } + ] +} diff --git a/quiz-banks/devops.quiz.json b/quiz-banks/devops.quiz.json new file mode 100644 index 0000000..c0a4dee --- /dev/null +++ b/quiz-banks/devops.quiz.json @@ -0,0 +1,833 @@ +{ + "moduleId": "devops", + "moduleTitle": "DevOps", + "description": "Scenario-based assessment focused on advanced DevOps delivery, cloud operating models, container orchestration, infrastructure as code, and production observability.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "devops-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A retail platform ships features from one team while a separate operations group owns uptime. Every Friday release turns into a blame-heavy handoff, and emergency changes bypass normal review because each side is optimizing different KPIs.", + "prompt": "Which structural change best addresses the root cause rather than only speeding up the handoff?", + "options": [ + "Create a cross-functional service team with shared delivery and reliability metrics", + "Add a mandatory change advisory board meeting before every production deployment", + "Move all production access from developers to operations permanently", + "Require releases to happen only once per month" + ], + "correctAnswer": 0, + "explanation": "The failure mode is misaligned incentives and ownership. A cross-functional team with shared delivery and reliability goals removes the wall of confusion, while extra approvals, stricter separation, or less frequent releases preserve the same siloed structure.", + "tags": [ + "devops-culture", + "shared-ownership", + "team-topologies" + ], + "references": [ + "Introduction", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A payment service compiles successfully on every commit, but the team still sees production-only breakages caused by schema drift, unsigned artifacts, and last-minute environment differences.", + "prompt": "Which controls belong in the delivery pipeline before production promotion?", + "options": [ + "Run integration and contract tests against a production-like environment", + "Validate database migrations in a rehearsal step before deploy", + "Allow engineers to rebuild the artifact on the target server so fixes can be applied quickly", + "Sign or otherwise attest the artifact that was tested", + "Rely on manual smoke testing in production as the first full-system check" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Prod-like validation, migration rehearsal, and artifact integrity checks reduce the exact failure modes described. Rebuilding on target servers and treating production as the first real integration environment destroy reproducibility.", + "tags": [ + "ci-cd", + "artifact-integrity", + "release-safety", + "database-migrations" + ], + "references": [ + "CI/CD Pipeline", + "Infrastructure as Code" + ] + }, + { + "id": "devops-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A team claims it already practices continuous deployment because every merge to main creates a release candidate and requires one human approval before production.", + "prompt": "True or false: this workflow is continuous deployment rather than continuous delivery.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "A human approval gate means the system is continuously delivering a production-ready artifact, not continuously deploying every eligible change automatically.", + "tags": [ + "continuous-delivery", + "continuous-deployment", + "release-governance" + ], + "references": [ + "CI/CD Pipeline", + "Introduction" + ] + }, + { + "id": "devops-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "An organization is redesigning its release process so the same tested artifact moves progressively from commit to production with rollback gates.", + "prompt": "Arrange the pipeline stages from earliest to latest.", + "items": [ + "Build an immutable artifact from the merged commit", + "Execute automated tests and security checks on that artifact", + "Deploy the artifact to a production-like pre-release environment", + "Roll out the exact artifact progressively to production", + "Evaluate live health signals and trigger rollback if thresholds are breached" + ], + "correctAnswer": [ + "Build an immutable artifact from the merged commit", + "Execute automated tests and security checks on that artifact", + "Deploy the artifact to a production-like pre-release environment", + "Roll out the exact artifact progressively to production", + "Evaluate live health signals and trigger rollback if thresholds are breached" + ], + "explanation": "A trustworthy pipeline builds once, validates that exact artifact, rehearses it in a prod-like environment, then promotes it progressively while observing health and enforcing rollback thresholds.", + "tags": [ + "progressive-delivery", + "artifact-promotion", + "rollback" + ], + "references": [ + "CI/CD Pipeline", + "Observability" + ] + }, + { + "id": "devops-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "An enterprise is deciding which cloud service model fits four very different workloads.", + "prompt": "Match each workload to the most appropriate cloud service model.", + "premises": [ + "A team needs full operating-system control to run a legacy application that depends on custom kernel modules", + "Developers want to deploy web code without managing servers or patching the runtime", + "The company wants a fully managed collaboration suite for email and documents", + "A bursty image-resizing step should execute only when files land in object storage" + ], + "responses": [ + "Infrastructure as a Service (IaaS)", + "Platform as a Service (PaaS)", + "Software as a Service (SaaS)", + "Function as a Service (FaaS)" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "IaaS fits workloads needing operating-system control, PaaS removes server management for application deployment, SaaS provides a finished application, and FaaS is best for event-driven code that runs only when triggered.", + "tags": [ + "cloud-service-models", + "shared-responsibility", + "workload-fit" + ], + "references": [ + "Cloud Service Models", + "Cloud Architecture" + ] + }, + { + "id": "devops-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A team runs workloads on a managed Kubernetes service. They assume the provider is responsible for all security because the control plane is managed.", + "prompt": "Which responsibility still belongs most directly to the customer team?", + "options": [ + "Patching vulnerabilities inside the application containers they build and deploy", + "Designing the cloud provider's hypervisor isolation model", + "Replacing failed disks in the provider's data center", + "Maintaining the Kubernetes API server binaries for every control-plane node" + ], + "correctAnswer": 0, + "explanation": "Managed control planes reduce infrastructure burden, but the customer still owns the software supply chain and the security of the container images and application logic they ship.", + "tags": [ + "shared-responsibility", + "managed-kubernetes", + "container-security" + ], + "references": [ + "Cloud Service Models", + "Container Orchestration" + ] + }, + { + "id": "devops-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A flash-sale platform must survive sudden regional traffic spikes while keeping catalog reads fast and preventing single-instance hotspots.", + "prompt": "Which architectural choices are most aligned with that goal?", + "options": [ + "Distribute traffic across healthy instances with load balancing", + "Use a cache-aside layer for high-read catalog data", + "Keep one stateful application server as the only write and read path for all traffic", + "Design stateless services so replicas can scale horizontally", + "Force every large file upload to pass through the monolith before reaching object storage" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Load balancing, cache-aside, and stateless horizontal scaling are standard ways to absorb spiky traffic. A single hot server or routing all large transfers through the monolith creates bottlenecks.", + "tags": [ + "load-balancing", + "cache-aside", + "horizontal-scaling", + "resilience" + ], + "references": [ + "Cloud Architecture", + "Introduction" + ] + }, + { + "id": "devops-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A browser uploads large media files directly to cloud storage using a short-lived token issued by the backend, reducing load on the origin application.", + "prompt": "Which architecture pattern is being used?", + "options": [ + "Valet Key", + "Blue-green deployment", + "Sidecar injection", + "Canary release" + ], + "correctAnswer": 0, + "explanation": "The backend is granting limited direct access to a specific storage resource with a temporary credential, which is the Valet Key pattern.", + "tags": [ + "valet-key", + "object-storage", + "direct-upload" + ], + "references": [ + "Cloud Architecture", + "Cloud Service Models" + ] + }, + { + "id": "devops-q09", + "type": "true-false", + "difficulty": "expert", + "scenario": "A leadership team breaks a monolith into dozens of services and expects operations to become simpler because each deployable unit is smaller.", + "prompt": "True or false: microservices automatically reduce overall operational complexity.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Microservices reduce the size of individual deployable units, but they add distributed-system complexity in networking, deployments, observability, data consistency, and ownership boundaries.", + "tags": [ + "microservices", + "distributed-systems", + "operational-complexity" + ], + "references": [ + "Cloud Architecture", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q10", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A managed web platform auto-scales correctly, but each new instance spends several minutes compiling assets during startup, delaying scale-out during traffic surges.", + "prompt": "What is the best pipeline-level fix?", + "options": [ + "Move compilation into CI and ship a prebuilt deployable artifact or image", + "Increase instance memory so the runtime compiler can work faster", + "Add more manual approval gates before release", + "Switch from HTTPS to HTTP inside the cluster" + ], + "correctAnswer": 0, + "explanation": "Heavy build work belongs in CI, not in runtime instance startup. Shipping a prebuilt artifact reduces cold-start latency and makes scaling behavior more predictable.", + "tags": [ + "build-vs-runtime", + "autoscaling", + "artifact-promotion" + ], + "references": [ + "CI/CD Pipeline", + "Cloud Service Models" + ] + }, + { + "id": "devops-q11", + "type": "multi-select", + "difficulty": "expert", + "scenario": "During a Kubernetes rolling update, users intermittently receive 502s even though the deployment never fully fails.", + "prompt": "Which configuration problems could plausibly explain the outage symptoms?", + "options": [ + "A readiness check reports success before the application and its dependencies are actually ready", + "An aggressive liveness probe restarts slow-starting containers", + "Resource requests and limits are correctly sized for steady-state traffic", + "The Service selector no longer matches the labels on the new pods", + "A rollout policy allows too many old pods to terminate before enough new pods are ready" + ], + "correctAnswer": [ + 0, + 1, + 3, + 4 + ], + "explanation": "Early readiness, restart loops, selector mismatches, and unsafe rollout parameters can all create intermittent unavailability during a rolling update. Properly sized resources alone would not explain those specific symptoms.", + "tags": [ + "kubernetes", + "rolling-updates", + "probes", + "service-routing" + ], + "references": [ + "Container Orchestration", + "Observability" + ] + }, + { + "id": "devops-q12", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A queue-processing deployment scales on CPU, but real user latency is driven by backlog growth. CPU stays low while jobs pile up.", + "prompt": "Which autoscaling signal is most appropriate?", + "options": [ + "A custom metric such as queue depth or lag per replica", + "Container image size in megabytes", + "Node uptime since the last reboot", + "Whether the last deployment used blue-green strategy" + ], + "correctAnswer": 0, + "explanation": "If user impact is caused by backlog rather than CPU saturation, scaling should follow the business-relevant load signal, such as queue depth, lag, or time-to-drain.", + "tags": [ + "autoscaling", + "custom-metrics", + "queue-workers" + ], + "references": [ + "Container Orchestration", + "Observability" + ] + }, + { + "id": "devops-q13", + "type": "matching", + "difficulty": "expert", + "scenario": "A platform team is teaching new engineers how core Kubernetes mechanisms map to day-to-day operational goals.", + "prompt": "Match each operational goal to the Kubernetes feature that best fits it.", + "premises": [ + "Give each replica a stable identity and durable volume for an ordered cluster member", + "Prevent voluntary maintenance from evicting too many replicas at once", + "Keep a pod out of service discovery until it can actually serve traffic", + "Adjust replica count automatically when demand metrics rise" + ], + "responses": [ + "StatefulSet", + "PodDisruptionBudget", + "Readiness probe", + "HorizontalPodAutoscaler" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "StatefulSet handles stable identity and storage, PodDisruptionBudget protects availability during maintenance, readiness controls traffic admission, and HPA automates scaling based on metrics.", + "tags": [ + "kubernetes-primitives", + "availability", + "autoscaling" + ], + "references": [ + "Container Orchestration", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q14", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An engineer manually changes production load balancer settings in the cloud console. The next Terraform run proposes replacing or rewriting resources that the team did not expect.", + "prompt": "Which control most directly surfaces this risk before production is touched?", + "options": [ + "Require reviewed Terraform plan output in CI against shared remote state", + "Let each engineer keep a personal local state file for production", + "Apply infrastructure changes only from laptops after a quick chat", + "Disable drift detection because it generates noisy differences" + ], + "correctAnswer": 0, + "explanation": "A reviewed plan against shared state makes drift visible before apply. Local state files and informal laptop workflows hide coordination risk rather than controlling it.", + "tags": [ + "terraform", + "drift-detection", + "change-review" + ], + "references": [ + "Infrastructure as Code", + "Cloud Architecture" + ] + }, + { + "id": "devops-q15", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A team stores production Terraform state on individual laptops because the actual infrastructure is remote anyway.", + "prompt": "True or false: the location of the state file is mostly a convenience issue and has little effect on coordination or safety.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "State location affects locking, concurrency control, drift visibility, auditability, and who can safely coordinate changes. Treating it as a local convenience creates real operational risk.", + "tags": [ + "terraform-state", + "coordination", + "iac-safety" + ], + "references": [ + "Infrastructure as Code", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q16", + "type": "ordering", + "difficulty": "expert", + "scenario": "A backend team needs a zero-downtime database change for a busy service where old and new application versions will run side by side during rollout.", + "prompt": "Place the migration steps in the safest order.", + "items": [ + "Add a backward-compatible schema change that old and new code can both tolerate", + "Deploy application code that can read and write both old and new representations", + "Backfill or transform historical data in the background", + "Switch reads and writes fully to the new representation after validation", + "Remove the obsolete schema path in a later cleanup release" + ], + "correctAnswer": [ + "Add a backward-compatible schema change that old and new code can both tolerate", + "Deploy application code that can read and write both old and new representations", + "Backfill or transform historical data in the background", + "Switch reads and writes fully to the new representation after validation", + "Remove the obsolete schema path in a later cleanup release" + ], + "explanation": "The safe pattern is expand, support both versions, migrate data, cut over after validation, then contract later. This avoids breaking mixed-version deployments during rollout.", + "tags": [ + "zero-downtime-migrations", + "expand-contract", + "backward-compatibility" + ], + "references": [ + "CI/CD Pipeline", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A review of an infrastructure repository finds several practices that may undermine reproducibility and safe promotion.", + "prompt": "Which changes would strengthen immutable and reviewable infrastructure delivery?", + "options": [ + "Pin provider and module versions explicitly", + "Apply the exact reviewed plan artifact rather than recalculating a new plan at deploy time", + "Select the base machine image by an unbounded latest wildcard on every apply", + "Commit plaintext secrets to version control so plans are deterministic", + "Keep bootstrap scripts versioned and rendered from source-controlled templates" + ], + "correctAnswer": [ + 0, + 1, + 4 + ], + "explanation": "Pinned versions, reviewed plan artifacts, and versioned bootstrap inputs improve reproducibility. Unbounded latest selectors and plaintext secrets introduce non-determinism and security problems.", + "tags": [ + "immutable-infrastructure", + "terraform", + "reproducibility", + "secrets-management" + ], + "references": [ + "Infrastructure as Code", + "CI/CD Pipeline" + ] + }, + { + "id": "devops-q18", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A company has 80 services with different compliance, test, and rollback needs. A central platform team is deciding how much of CI/CD to standardize.", + "prompt": "Which approach best balances consistency with service-level autonomy?", + "options": [ + "Provide a paved-road pipeline template with mandatory core controls and service-specific extensions", + "Force every service to use one identical monolithic pipeline regardless of risk profile", + "Let every team invent tooling and release logic from scratch", + "Eliminate automated pipelines and rely on manual release checklists" + ], + "correctAnswer": 0, + "explanation": "A paved road standardizes the critical controls while still allowing service teams to adapt to their delivery context. Total uniformity or total fragmentation both create avoidable operational cost.", + "tags": [ + "platform-engineering", + "golden-path", + "pipeline-standardization" + ], + "references": [ + "Modern Dev Roles", + "CI/CD Pipeline" + ] + }, + { + "id": "devops-q19", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A frontend platform team moves authentication checks and personalization logic to CDN edge functions close to users.", + "prompt": "What primary benefit are they seeking?", + "options": [ + "Reducing origin round-trips and latency before requests reach the core backend", + "Making relational database joins cheaper", + "Eliminating the need for cache-control headers", + "Guaranteeing that all bugs are caught before deployment" + ], + "correctAnswer": 0, + "explanation": "Edge execution is primarily about handling work near the requester so less traffic travels back to the origin, which improves latency and often reduces backend load.", + "tags": [ + "edge-computing", + "frontend-platform", + "latency" + ], + "references": [ + "Modern Dev Roles", + "Cloud Architecture" + ] + }, + { + "id": "devops-q20", + "type": "matching", + "difficulty": "expert", + "scenario": "During a checkout incident, engineers need different kinds of telemetry to answer different questions quickly.", + "prompt": "Match each investigative need to the telemetry signal that fits it best.", + "premises": [ + "Track whether p99 latency is burning the user-facing error budget over time", + "See the causal path of one request across API gateway, payment service, and database", + "Inspect the exact payload and exception context for a failed background job", + "Jump from an anomalous latency point to representative request traces" + ], + "responses": [ + "Metrics", + "Distributed traces", + "Structured logs", + "Metrics with trace exemplars" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Metrics summarize health over time, traces show a single request path across services, logs capture rich event context, and exemplars link aggregated metric points to concrete trace samples.", + "tags": [ + "observability", + "metrics", + "tracing", + "logging" + ], + "references": [ + "Observability", + "Modern Dev Roles" + ] + }, + { + "id": "devops-q21", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A service has almost exhausted its monthly error budget, but product leadership still wants a risky feature release this afternoon.", + "prompt": "Which responses are consistent with SRE-style reliability governance?", + "options": [ + "Pause high-risk releases and prioritize stabilization work", + "Page engineers immediately on every 4xx regardless of user impact", + "Alert on burn rate and other symptoms tied to user-facing objectives", + "Skip the post-incident review because the team already knows the proximate cause", + "Use canary rollout gates that automatically halt or roll back on SLO regression" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Error budgets exist to guide release risk, alerting should map to user impact, and rollout automation should enforce objective guardrails. Blanket paging and skipped learning loops make reliability worse.", + "tags": [ + "sre", + "error-budgets", + "slo", + "canary" + ], + "references": [ + "Observability", + "CI/CD Pipeline" + ] + }, + { + "id": "devops-q22", + "type": "single-choice", + "difficulty": "expert", + "scenario": "Distributed tracing is sampled at 1%, and the team keeps missing rare failures because the bad requests often fall outside the sample.", + "prompt": "Which change improves insight while keeping telemetry cost under control?", + "options": [ + "Adopt tail-based or policy-based sampling that retains error and high-latency traces", + "Disable tracing entirely and rely only on dashboard averages", + "Sample traces purely at random but lower the rate further", + "Log only stack traces without request correlation identifiers" + ], + "correctAnswer": 0, + "explanation": "Policy-aware sampling preserves the traces that matter most, such as failures and high latency, without paying to retain every successful request at full fidelity.", + "tags": [ + "distributed-tracing", + "sampling", + "telemetry-cost" + ], + "references": [ + "Observability" + ] + }, + { + "id": "devops-q23", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An operations team centralizes logs in a searchable platform and concludes that dedicated metrics are now redundant.", + "prompt": "True or false: centralized logs alone are usually sufficient for low-latency alerting and high-level capacity signals.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Logs are rich but expensive and noisy for constant aggregation. Metrics provide cheap, low-latency summaries that are better suited to alerting, trending, and capacity planning.", + "tags": [ + "logging", + "metrics", + "alerting" + ], + "references": [ + "Observability", + "Introduction" + ] + }, + { + "id": "devops-q24", + "type": "single-choice", + "difficulty": "expert", + "scenario": "In a shared SaaS platform, one noisy enterprise tenant saturates worker pools and degrades latency for everyone else.", + "prompt": "Which control most directly protects other tenants while preserving shared-infrastructure economics?", + "options": [ + "Introduce tenant-aware rate limits and workload isolation boundaries", + "Move every customer to a dedicated on-premises deployment", + "Disable caching so all requests are treated equally", + "Route every request through a single global worker queue with no quotas" + ], + "correctAnswer": 0, + "explanation": "Well-designed multi-tenancy depends on fairness and isolation controls such as quotas, rate limits, or pool partitioning so one tenant cannot consume the shared platform at everyone else's expense.", + "tags": [ + "multi-tenancy", + "rate-limiting", + "fairness", + "isolation" + ], + "references": [ + "Cloud Architecture", + "Observability" + ] + }, + { + "id": "devops-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A deployment job rebuilds a Docker image during release instead of promoting the exact image that passed tests and security scans earlier in CI.", + "prompt": "What is the principal risk in that design?", + "options": [ + "Production may run a different artifact than the one that was validated", + "Horizontal scaling will stop working for the service", + "The cluster control plane will require manual patching", + "Terraform state will become unreadable" + ], + "correctAnswer": 0, + "explanation": "Rebuilding during deployment breaks provenance and reproducibility. The artifact in production can diverge from the one that passed validation, which weakens both reliability and supply-chain assurance.", + "tags": [ + "artifact-promotion", + "supply-chain", + "container-images" + ], + "references": [ + "CI/CD Pipeline", + "Container Orchestration" + ] + }, + { + "id": "devops-q26", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A stateful service must stay available while Kubernetes nodes are drained weekly for security patching.", + "prompt": "Which measures improve availability during planned node maintenance?", + "options": [ + "Define a PodDisruptionBudget for the service", + "Run a single replica to simplify quorum decisions", + "Spread replicas across failure domains with anti-affinity or topology rules", + "Use readiness checks that verify the instance can safely rejoin and serve traffic", + "Cordon and drain all nodes at once to shorten the maintenance window" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "Maintenance safety depends on limiting simultaneous disruption, spreading replicas so one node action cannot remove all healthy instances, and only admitting traffic when a restarted member is truly ready.", + "tags": [ + "poddisruptionbudget", + "anti-affinity", + "node-maintenance", + "stateful-workloads" + ], + "references": [ + "Container Orchestration", + "Cloud Architecture" + ] + }, + { + "id": "devops-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A bursty serverless image processor scales from zero to thousands of invocations and overwhelms a relational database with connection storms.", + "prompt": "Which architectural adjustment best addresses the root cause?", + "options": [ + "Introduce a connection pool or proxy and buffer work so concurrency reaching the database is controlled", + "Increase function timeout so each invocation holds its database connection longer", + "Disable autoscaling for the function platform", + "Move the SQL schema into application logs" + ], + "correctAnswer": 0, + "explanation": "The problem is unconstrained fan-out to a database with limited connection capacity. Pooling, proxies, or queue-based smoothing control concurrency without giving up the elasticity of event-driven compute.", + "tags": [ + "serverless", + "database-connections", + "backpressure", + "faas" + ], + "references": [ + "Cloud Architecture", + "Cloud Service Models" + ] + }, + { + "id": "devops-q28", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An executive dashboard shows CPU, memory, and disk utilization for production hosts, and leadership declares the platform fully observable.", + "prompt": "True or false: infrastructure dashboards alone are enough to claim real observability for a distributed product.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Host dashboards are useful, but observability requires enough correlated service, dependency, and request-level telemetry to explain unknown failure modes in the product itself.", + "tags": [ + "observability", + "service-telemetry", + "distributed-systems" + ], + "references": [ + "Observability", + "Introduction" + ] + }, + { + "id": "devops-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "Platform engineers build self-service templates that let product teams create ephemeral environments mirroring production through approved APIs and guardrails.", + "prompt": "Which DevOps capability does this most closely represent?", + "options": [ + "An internal developer platform or paved road built on infrastructure automation", + "A replacement for observability because environments are short-lived", + "A pure SaaS adoption strategy with no infrastructure ownership", + "A manual break-glass process for exceptional outages only" + ], + "correctAnswer": 0, + "explanation": "This is the essence of a platform or paved-road model: standardize safe infrastructure automation so teams can self-serve without opening up unsafe snowflake environments.", + "tags": [ + "platform-engineering", + "self-service", + "ephemeral-environments", + "iac" + ], + "references": [ + "Modern Dev Roles", + "Infrastructure as Code" + ] + }, + { + "id": "devops-q30", + "type": "single-choice", + "difficulty": "expert", + "scenario": "After moving to canary deployments, release decisions are still made by humans staring at dashboards for 30 minutes, and different operators make different calls on the same data.", + "prompt": "What is the strongest next improvement?", + "options": [ + "Automate progressive delivery analysis with objective rollback thresholds tied to service health", + "Shorten the observation window to two minutes so humans decide faster", + "Stop collecting metrics during rollout to reduce noise", + "Return to big-bang deployments so success is easier to judge" + ], + "correctAnswer": 0, + "explanation": "Canary delivery becomes far more reliable when promotion and rollback are driven by explicit health analysis instead of inconsistent human interpretation under time pressure.", + "tags": [ + "canary", + "progressive-delivery", + "automated-rollbacks", + "release-analysis" + ], + "references": [ + "CI/CD Pipeline", + "Observability" + ] + }, + { + "id": "devops-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "Post-incident reviews show that alerts fire quickly, but recovery is slow because database, application, and infrastructure responsibilities are unclear during active incidents.", + "prompt": "Which change is most likely to reduce mean time to recovery?", + "options": [ + "Define clear service ownership, on-call roles, and tested runbooks for incident response", + "Send every alert to every engineer so someone will pick it up", + "Replace all paging with a daily summary email", + "Increase deployment frequency without changing incident process" + ], + "correctAnswer": 0, + "explanation": "Low MTTR depends on clear ownership, explicit escalation paths, and runbooks that have been exercised before the incident. Broadcasting alerts widely without structure usually increases confusion instead of speed.", + "tags": [ + "mttr", + "incident-response", + "ownership", + "runbooks" + ], + "references": [ + "Modern Dev Roles", + "Observability" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/git.quiz.json b/quiz-banks/git.quiz.json new file mode 100644 index 0000000..8615b0c --- /dev/null +++ b/quiz-banks/git.quiz.json @@ -0,0 +1,861 @@ +{ + "moduleId": "git", + "moduleTitle": "Git", + "description": "Scenario-based assessment focused on advanced Git architecture, the three-tree model, object internals, branching strategy, professional collaboration workflows, history manipulation, and recovery decisions.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "git-q01", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A developer boards a long flight after cloning the repository the night before. Mid-flight, they review old commits, create a new branch, and make several commits even though the company VPN is unavailable.", + "prompt": "Which Git property best explains why that workflow still works?", + "options": [ + "The working directory automatically mirrors the remote while offline", + "Each clone contains a complete local repository with full history", + "Git stores only the latest snapshot locally and reconstructs older commits from cache", + "The staging area proxies all write operations until the server becomes reachable" + ], + "correctAnswer": 1, + "explanation": "Git is distributed, so every clone contains the full repository history and object database. That allows local history inspection, branching, and committing without network access.", + "tags": [ + "distributed-vcs", + "offline-work", + "architecture" + ], + "references": [ + "Introduction", + "Git Architecture" + ] + }, + { + "id": "git-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An engineering manager is comparing Git to a centralized VCS for a globally distributed team that frequently works with unstable connectivity.", + "prompt": "Which capabilities come directly from every clone containing full history?", + "options": [ + "Developers can inspect logs and create commits offline", + "Any clone can serve as another backup copy of project history", + "Pushes can never be rejected because all clones are complete", + "Many branch and history operations run against local data instead of a central server", + "Repository permissions are no longer necessary" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Full local history enables offline commits and log inspection, makes each clone a backup, and keeps many operations local and fast. It does not eliminate conflicts or access control.", + "tags": [ + "distributed-vcs", + "performance", + "collaboration", + "resilience" + ], + "references": [ + "Git Architecture", + "Introduction" + ] + }, + { + "id": "git-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "You staged only part of a file with interactive staging, then continued editing the same file before committing.", + "prompt": "True or false: the next git commit records the staged snapshot from the index even if the working directory now contains newer unstaged edits.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "git commit records the snapshot currently in the staging area, not whatever happens to be newest in the working directory. That separation is the core of the three-tree model.", + "tags": [ + "index", + "partial-staging", + "three-tree-model" + ], + "references": [ + "Three-Tree Model", + "Core Workflow" + ] + }, + { + "id": "git-q04", + "type": "ordering", + "difficulty": "advanced", + "scenario": "A new team member wants the exact sequence of state transitions when a tracked file is edited and then published to the remote repository.", + "prompt": "Arrange the normal publish cycle from earliest to latest.", + "items": [ + "Edit the tracked file in the working directory", + "Stage the selected snapshot in the index with git add", + "Create a commit object from the staged snapshot", + "Move the current branch reference to the new commit", + "Transfer the new commit objects to the remote with git push" + ], + "correctAnswer": [ + "Edit the tracked file in the working directory", + "Stage the selected snapshot in the index with git add", + "Create a commit object from the staged snapshot", + "Move the current branch reference to the new commit", + "Transfer the new commit objects to the remote with git push" + ], + "explanation": "The file is edited in the working directory, copied into the index with git add, committed from the index, and then shared with the remote through push.", + "tags": [ + "workflow", + "index", + "push", + "state-transition" + ], + "references": [ + "Three-Tree Model", + "Core Workflow", + "Git Architecture" + ] + }, + { + "id": "git-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "A tooling team is documenting Git internals before building a repository visualizer.", + "prompt": "Match each Git object to its primary role.", + "premises": [ + "Blob", + "Tree", + "Commit", + "Annotated tag" + ], + "responses": [ + "Stores raw file contents without filename metadata", + "Represents a directory snapshot by pointing to blobs and other trees", + "Records a snapshot root, parent relationships, author data, and a message", + "Names another object and can carry release metadata" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Blobs store file content, trees describe directory structure, commits connect snapshots into history, and annotated tags add metadata to a referenced object such as a release commit.", + "tags": [ + "object-model", + "blob", + "tree", + "commit", + "tag" + ], + "references": [ + "Object Model", + "Visualization" + ] + }, + { + "id": "git-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A monorepo contains many identical generated LICENSE files in different directories, yet the object database growth is smaller than expected.", + "prompt": "Which Git behavior best explains that outcome?", + "options": [ + "Trees merge identical filenames into a single directory entry automatically", + "Identical content hashes to the same blob object and can be reused", + "Commits skip duplicated files when they are on different paths", + "Deduplication happens only on the remote during git push" + ], + "correctAnswer": 1, + "explanation": "Git is content-addressable. Identical file content produces the same blob hash, so identical blobs can be reused regardless of path names.", + "tags": [ + "content-addressing", + "deduplication", + "blob", + "sha1" + ], + "references": [ + "Object Model", + "Introduction" + ] + }, + { + "id": "git-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A developer made one local commit too early on a private feature branch and wants to choose the correct reset mode without losing track of the effects.", + "prompt": "Which outcomes correctly describe the reset variants covered in the module?", + "options": [ + "git reset --soft HEAD~1 moves branch and HEAD back while leaving the changes staged", + "git reset --mixed HEAD~1 keeps the file modifications but removes them from staging", + "git reset --hard HEAD~1 moves back one commit and discards the corresponding staged and working tree changes from the current checkout", + "git reset --soft HEAD~1 is the preferred fix for a published mistake on shared main", + "git reset --hard HEAD~1 is the safe replacement for revert on shared branches" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Soft keeps changes staged, mixed unstages them, and hard discards them from the current checkout. On shared published history, revert is the safer tool.", + "tags": [ + "reset", + "history-management", + "index", + "safety" + ], + "references": [ + "History Management", + "Three-Tree Model" + ] + }, + { + "id": "git-q08", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teammate runs git branch feature/api-cleanup and starts working, assuming they have already switched branches. Later they discover the new commits are still on main.", + "prompt": "What did git branch actually do?", + "options": [ + "Created and checked out feature/api-cleanup in one step", + "Created a new branch pointer at the current commit without switching HEAD", + "Detached HEAD at the current commit", + "Copied the staged snapshot into a new remote branch" + ], + "correctAnswer": 1, + "explanation": "git branch creates the branch reference but does not switch to it. HEAD remains on the current branch until git switch or git checkout is used.", + "tags": [ + "branches", + "head", + "refs" + ], + "references": [ + "Branching & Merging", + "Visualization" + ] + }, + { + "id": "git-q09", + "type": "true-false", + "difficulty": "expert", + "scenario": "Two teammates have already based work on a shared feature branch. You want a cleaner history before release.", + "prompt": "True or false: rebasing that published branch is usually safer for the team than merging main into it because rebase avoids an extra merge commit.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Rebasing rewrites commit history. Once others have based work on those commits, rewriting the branch can disrupt them. Merging is usually the safer option for published shared branches.", + "tags": [ + "rebase", + "merge", + "published-history", + "team-safety" + ], + "references": [ + "Branching & Merging", + "History Management", + "Professional Workflows" + ] + }, + { + "id": "git-q10", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A SaaS team deploys from main several times a day and relies on pull requests to review short-lived feature branches.", + "prompt": "Which workflow from the module best matches that operating model?", + "options": [ + "Git Flow", + "GitHub Flow", + "GitLab Flow", + "Centralized Workflow" + ], + "correctAnswer": 1, + "explanation": "GitHub Flow is optimized for continuous deployment and a stable main branch with short-lived feature branches and pull requests.", + "tags": [ + "github-flow", + "continuous-deployment", + "pull-requests" + ], + "references": [ + "Professional Workflows" + ] + }, + { + "id": "git-q11", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A bad commit landed on shared main and has already been pulled by the entire team. You need to undo it while minimizing disruption.", + "prompt": "Which actions align with the module's safe shared-branch guidance?", + "options": [ + "Create a revert commit that undoes the bad change", + "Use git reset --hard on main and force-push immediately", + "Communicate with the team before any destructive history rewrite", + "Prefer revert over reset for published history", + "Start an interactive rebase on main to remove the bad commit from everyone's history" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "The module recommends preserving shared published history whenever possible. Revert is the safe default, and destructive rewrites require caution and communication.", + "tags": [ + "revert", + "shared-branches", + "history-rewrite", + "team-workflow" + ], + "references": [ + "History Management", + "Professional Workflows", + "Troubleshooting" + ] + }, + { + "id": "git-q12", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "git push origin main is rejected as non-fast-forward because another developer pushed first. There is no merge or rebase already in progress locally.", + "prompt": "What is the best next step?", + "options": [ + "Delete origin/main locally and push again", + "Pull remote changes, resolve any conflicts if needed, then push", + "Run git commit --amend until the hashes match the remote", + "Run git gc and retry the push" + ], + "correctAnswer": 1, + "explanation": "A non-fast-forward rejection means the remote has commits you do not have locally. You need to integrate them first through pull, often with merge or rebase, and then push.", + "tags": [ + "non-fast-forward", + "push", + "remote", + "troubleshooting" + ], + "references": [ + "Troubleshooting", + "Core Workflow" + ] + }, + { + "id": "git-q13", + "type": "ordering", + "difficulty": "advanced", + "scenario": "A merge stops after Git inserts conflict markers into app.js and reports the conflict to the terminal.", + "prompt": "Put the conflict-resolution process in the safest order.", + "items": [ + "Run git status to confirm which files are conflicted", + "Inspect the conflicted file and decide the final content", + "Remove the conflict markers and save the resolved file", + "Stage the resolved file with git add", + "Complete the operation with git commit or git rebase --continue" + ], + "correctAnswer": [ + "Run git status to confirm which files are conflicted", + "Inspect the conflicted file and decide the final content", + "Remove the conflict markers and save the resolved file", + "Stage the resolved file with git add", + "Complete the operation with git commit or git rebase --continue" + ], + "explanation": "First confirm the conflicted files, then resolve the file contents, stage the resolution, and finally finish the suspended Git operation.", + "tags": [ + "merge-conflicts", + "rebase-conflicts", + "resolution", + "workflow" + ], + "references": [ + "Troubleshooting", + "Branching & Merging" + ] + }, + { + "id": "git-q14", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "You are teaching interns why git add and git commit affect different internal areas, and you want the most directly relevant demo in the Git module.", + "prompt": "Which visualization should you open first?", + "options": [ + "Git Architecture", + "Three-Tree Model", + "Object Model", + "History Management" + ], + "correctAnswer": 1, + "explanation": "The three-tree visualization is designed to show the relationships among the working directory, staging area, and repository, which is exactly what git add and git commit change.", + "tags": [ + "visualization", + "index", + "working-directory", + "repository" + ], + "references": [ + "Visualization", + "Three-Tree Model" + ] + }, + { + "id": "git-q15", + "type": "matching", + "difficulty": "expert", + "scenario": "A platform organization is standardizing Git workflows across four teams with very different release constraints.", + "prompt": "Match each team situation to the workflow that fits best.", + "premises": [ + "Enterprise product with scheduled releases, hotfixes, and parallel release preparation", + "Web application that deploys directly from main after approved pull requests", + "Service that promotes code through staging and production branches", + "Open-source project receiving contributions from developers without direct push access" + ], + "responses": [ + "Git Flow", + "GitHub Flow", + "GitLab Flow", + "Fork and pull request model" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Git Flow fits structured release trains, GitHub Flow fits continuously deployed apps, GitLab Flow matches environment-based promotion, and fork plus pull request workflows fit external contribution models.", + "tags": [ + "workflow-selection", + "git-flow", + "github-flow", + "gitlab-flow", + "fork-pr" + ], + "references": [ + "Professional Workflows", + "Git Architecture" + ] + }, + { + "id": "git-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "On feature/auth, you modify src/login.ts and commit once. The file sits under src/ at the repository root, and no other paths changed.", + "prompt": "Which new Git objects are necessarily created for that commit?", + "options": [ + "Only one new commit object, because the old tree can be reused unchanged", + "A new blob, a new tree for src, a new root tree, and a new commit", + "Only a new blob and a new tag", + "A new branch and a new blob, but no new commit until push" + ], + "correctAnswer": 1, + "explanation": "A changed file creates a new blob. Any tree containing that entry must also be recreated, up to the repository root, and the new snapshot is then recorded in a new commit object.", + "tags": [ + "object-model", + "trees", + "commits", + "snapshots" + ], + "references": [ + "Object Model", + "Three-Tree Model" + ] + }, + { + "id": "git-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "You are writing an internal recovery runbook for advanced Git users and want each statement to align with the module's guidance.", + "prompt": "Which statements are correct?", + "options": [ + "git revert preserves published history by adding a new inverse commit", + "git reflog can help recover states even after an accidental hard reset", + "Interactive rebase is useful for cleaning up local feature-branch commits before sharing them", + "Cherry-pick changes the original commit on its source branch", + "git reset --hard is the recommended default fix for mistakes on shared branches" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Revert adds new history instead of rewriting old history, reflog records reference movements for recovery, and interactive rebase is appropriate before commits are shared. Cherry-pick copies changes; it does not mutate the source commit.", + "tags": [ + "revert", + "reflog", + "interactive-rebase", + "recovery" + ], + "references": [ + "History Management", + "Troubleshooting" + ] + }, + { + "id": "git-q18", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A remote outage hits during a release freeze, but your laptop still has a current local clone.", + "prompt": "True or false: you can still inspect history, create branches, and make commits locally until connectivity returns.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Those operations are local because the repository history and objects exist in the clone. Only remote synchronization is blocked by the outage.", + "tags": [ + "offline-work", + "distributed-vcs", + "local-history" + ], + "references": [ + "Git Architecture", + "Introduction" + ] + }, + { + "id": "git-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "You stage api.ts, then continue editing api.ts without running git add again before committing.", + "prompt": "After the commit completes, where does the newer unstaged version of api.ts still exist?", + "options": [ + "Only in the working directory", + "Only in the new commit", + "Only in the staging area", + "Nowhere, because git commit consumes every newer edit automatically" + ], + "correctAnswer": 0, + "explanation": "The commit records the staged snapshot. Any later unstaged edits remain only in the working directory until staged explicitly.", + "tags": [ + "index", + "working-directory", + "partial-staging", + "commit" + ], + "references": [ + "Three-Tree Model", + "Core Workflow" + ] + }, + { + "id": "git-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A team lead wants to integrate feature/search while preserving the exact branch topology and avoiding history rewrite.", + "prompt": "Which approach best fits that goal?", + "options": [ + "Squash merge", + "Rebase feature/search onto main and fast-forward", + "Merge feature/search with a merge commit", + "Cherry-pick only the last feature commit" + ], + "correctAnswer": 2, + "explanation": "A merge commit preserves both parent histories and the visible branch structure. Rebase rewrites history, squash compresses it, and cherry-pick copies only selected changes.", + "tags": [ + "merge-commit", + "branch-topology", + "integration" + ], + "references": [ + "Branching & Merging", + "Professional Workflows" + ] + }, + { + "id": "git-q21", + "type": "matching", + "difficulty": "expert", + "scenario": "The help desk wants a quick map from common Git failures to the first diagnostic command engineers should try.", + "prompt": "Match each problem to the most useful first command.", + "premises": [ + "You think a lost commit vanished after a reset", + "A file is mysteriously not being tracked and may be ignored", + "Push was rejected as non-fast-forward", + "A merge stopped because of conflicts" + ], + "responses": [ + "git reflog", + "git check-ignore ", + "git pull --rebase origin main", + "git status" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "reflog helps recover lost references, check-ignore explains ignore behavior, pull with rebase integrates remote changes after a rejection, and status identifies conflicted files and repository state.", + "tags": [ + "diagnostics", + "reflog", + "check-ignore", + "non-fast-forward", + "status" + ], + "references": [ + "Troubleshooting" + ] + }, + { + "id": "git-q22", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A security review asks which Git design decision most directly removes the central server as the only durable copy of repository history.", + "prompt": "Which answer is best?", + "options": [ + "Git stores commit messages in plain text", + "Every clone contains the full repository history", + "Git automatically mirrors all remotes to one another", + "The staging area duplicates each file three times" + ], + "correctAnswer": 1, + "explanation": "Because every clone has the full history, the central server is not the only durable copy. That property is a core advantage of Git's distributed design.", + "tags": [ + "resilience", + "distributed-vcs", + "backup" + ], + "references": [ + "Introduction", + "Git Architecture" + ] + }, + { + "id": "git-q23", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A staff engineer is validating assumptions before building internal tooling that reads raw Git objects.", + "prompt": "Which statements are true about the Git object model described in the module?", + "options": [ + "Blobs store file contents, not filenames", + "Trees reference blobs and other trees to model directory structure", + "Commits point to parent commits and a root tree", + "Annotated tags can point to another object and include metadata", + "Editing an existing object in place preserves its hash" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Git objects are immutable. Blobs, trees, commits, and annotated tags all have specific roles, and changing object content would produce a different hash rather than mutating an existing object.", + "tags": [ + "object-model", + "immutability", + "tags", + "trees", + "commits" + ], + "references": [ + "Object Model", + "Visualization" + ] + }, + { + "id": "git-q24", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "After running git reset --hard HEAD~3, a developer realizes the target was wrong and cannot remember the discarded commit hashes.", + "prompt": "Where should they look first for recovery information?", + "options": [ + "git reflog", + "git blame", + "git bisect log", + ".gitignore" + ], + "correctAnswer": 0, + "explanation": "reflog records recent HEAD and reference movements, making it the primary recovery tool after an accidental reset or checkout mistake.", + "tags": [ + "reflog", + "recovery", + "reset", + "history-management" + ], + "references": [ + "History Management", + "Troubleshooting" + ] + }, + { + "id": "git-q25", + "type": "ordering", + "difficulty": "advanced", + "scenario": "Your team follows GitHub Flow for a continuously deployed service and wants the standard sequence documented.", + "prompt": "Order the workflow from branch start to release.", + "items": [ + "Create a feature branch from main", + "Commit work on the feature branch", + "Open a pull request for review", + "Merge approved changes into main", + "Deploy main" + ], + "correctAnswer": [ + "Create a feature branch from main", + "Commit work on the feature branch", + "Open a pull request for review", + "Merge approved changes into main", + "Deploy main" + ], + "explanation": "GitHub Flow centers on short-lived branches, pull requests, merging into a stable main branch, and deploying from main.", + "tags": [ + "github-flow", + "pull-requests", + "deployment", + "workflow" + ], + "references": [ + "Professional Workflows", + "Branching & Merging" + ] + }, + { + "id": "git-q26", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A teammate claims git add physically moves a file out of the working directory into the staging area, which is why editors would lose access to it.", + "prompt": "True or false: git add copies the selected file state into the index; it does not remove the file from the working directory.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "git add updates the index with a snapshot of content. The working directory file remains present and editable after staging.", + "tags": [ + "git-add", + "index", + "working-directory", + "three-tree-model" + ], + "references": [ + "Three-Tree Model", + "Core Workflow" + ] + }, + { + "id": "git-q27", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "You try to switch branches, but Git warns that local uncommitted edits would be overwritten. You want a clean temporary shelf so you can return later.", + "prompt": "Which command is the best fit?", + "options": [ + "git stash", + "git tag temp-work", + "git revert HEAD", + "git fetch --all" + ], + "correctAnswer": 0, + "explanation": "git stash is designed to temporarily save uncommitted changes so other operations, such as switching branches, can proceed cleanly.", + "tags": [ + "stash", + "branch-switching", + "workspace", + "troubleshooting" + ], + "references": [ + "Troubleshooting", + "Core Workflow", + "History Management" + ] + }, + { + "id": "git-q28", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A repository keeps hitting painful end-of-sprint merge conflicts across long-lived branches, and the team wants to change its habits.", + "prompt": "Which practices from the module are most likely to reduce that merge pain?", + "options": [ + "Keep branches focused on one feature or fix", + "Integrate with main regularly through merge or rebase", + "Delay synchronization until the branch is very large and nearly finished", + "Pull frequently and coordinate with teammates working in the same areas", + "Use one branch for multiple unrelated features to reduce branch count" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Small focused branches, regular integration, and active coordination reduce surprise conflicts. Waiting until the end and combining unrelated work on one branch usually makes conflicts worse.", + "tags": [ + "merge-conflicts", + "branching-strategy", + "integration", + "team-practices" + ], + "references": [ + "Branching & Merging", + "Troubleshooting", + "Professional Workflows" + ] + }, + { + "id": "git-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A faulty merge commit has already reached main. Team policy forbids rewriting published history, but the merge's effect must be removed.", + "prompt": "Which command pattern best fits that requirement?", + "options": [ + "git reset --hard ^ and then force-push", + "git revert -m 1 ", + "git rebase -i ^", + "git cherry-pick -m 1 " + ], + "correctAnswer": 1, + "explanation": "Reverting the merge creates a new commit that undoes its effect without rewriting published history. The -m option tells Git which parent is the mainline.", + "tags": [ + "revert", + "merge-commit", + "published-history", + "mainline-parent" + ], + "references": [ + "History Management", + "Branching & Merging" + ] + }, + { + "id": "git-q30", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "During a workshop, you want to demonstrate how reset, rebase, cherry-pick, and reflog alter the apparent timeline of commits.", + "prompt": "Which visualization from the module is the most relevant starting point?", + "options": [ + "Git Architecture", + "History Management", + "Object Model", + "Professional Workflows" + ], + "correctAnswer": 1, + "explanation": "The history visualization is the one focused on timeline-altering operations such as reset, rebase, cherry-pick, and recovery workflows.", + "tags": [ + "visualization", + "history-management", + "rebase", + "reset", + "cherry-pick" + ], + "references": [ + "Visualization", + "History Management" + ] + }, + { + "id": "git-q31", + "type": "true-false", + "difficulty": "expert", + "scenario": "A release engineer amends a commit message on a local branch and expects the commit hash to remain unchanged because no file contents were edited.", + "prompt": "True or false: changing commit metadata like the message creates a new commit object with a different hash.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Commit hashes are derived from the full commit object, including metadata such as the message, tree reference, and parent links. Changing that metadata creates a different object and therefore a different hash.", + "tags": [ + "commit-hash", + "immutability", + "amend", + "object-model" + ], + "references": [ + "Object Model", + "History Management" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/javascript.quiz.json b/quiz-banks/javascript.quiz.json new file mode 100644 index 0000000..1ce351f --- /dev/null +++ b/quiz-banks/javascript.quiz.json @@ -0,0 +1,555 @@ +{ + "moduleId": "javascript", + "moduleTitle": "JavaScript", + "description": "Scenario-based assessment focused on advanced JavaScript execution semantics, engine behavior, runtime coordination, memory management, and performance tradeoffs.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "javascript-q01", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A legacy dashboard registers timer callbacks inside a loop and all callbacks log the same final index after the loop completes.", + "prompt": "Which change most directly fixes the captured-index bug without changing the async scheduling model?", + "options": [ + "Replace var with let for the loop index", + "Wrap each callback in Promise.resolve().then", + "Move the loop body into queueMicrotask", + "Hoist the index variable outside the loop" + ], + "correctAnswer": 0, + "explanation": "let creates a distinct lexical binding for each iteration, so each callback captures its own index value. The bug is caused by one shared var binding surviving until the timers run.", + "tags": ["closures", "block-scope", "timers"], + "references": ["Call Stack & Execution", "Event Loop & Coordination"] + }, + { + "id": "javascript-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A hot scoring function was fast in benchmarks, but production traces now show frequent deoptimizations and unstable inline caches in V8.", + "prompt": "Which code patterns are likely contributors to the regression?", + "options": [ + "Objects are created with the same fields but in different property orders before reaching the function", + "A numeric field is sometimes passed as a string on a subset of calls", + "All objects come from one factory with one stable shape", + "A previously existing property is deleted from reused objects", + "The hot path starts using eval to read a property dynamically" + ], + "correctAnswer": [0, 1, 3, 4], + "explanation": "Shape instability, mixed value types, delete, and eval all interfere with the assumptions the optimizer and inline caches prefer. A single factory with one stable shape is the favorable case, not the cause.", + "tags": ["deoptimization", "hidden-classes", "inline-caches", "v8"], + "references": ["JIT Compilation Pipeline", "V8 Runtime Features", "JavaScript Engine"] + }, + { + "id": "javascript-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "A click handler resolves a Promise and also schedules visual work with requestAnimationFrame. The handler finishes quickly.", + "prompt": "True or false: if the current task completes before the next paint opportunity, the Promise reaction will run before that paint.", + "options": ["True", "False"], + "correctAnswer": 0, + "explanation": "Browser microtasks are drained at the microtask checkpoint after the current task, before the browser proceeds to later work such as rendering opportunities.", + "tags": ["microtasks", "rendering", "promises"], + "references": ["Event Loop & Coordination", "Task Queues & Priority", "Web APIs & Platform"] + }, + { + "id": "javascript-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are explaining to a performance team how a hot function moves from source code to highly optimized execution in V8.", + "prompt": "Arrange the stages from earliest to latest.", + "items": [ + "Optimizing compiler emits specialized machine code", + "Source text is parsed into tokens and an AST", + "Interpreter runs generated bytecode and gathers feedback", + "Bytecode is produced from the parsed representation", + "Collected feedback shows stable call patterns and object shapes" + ], + "correctAnswer": [ + "Source text is parsed into tokens and an AST", + "Bytecode is produced from the parsed representation", + "Interpreter runs generated bytecode and gathers feedback", + "Collected feedback shows stable call patterns and object shapes", + "Optimizing compiler emits specialized machine code" + ], + "explanation": "Parsing comes first, then bytecode generation, then interpreted execution collects feedback. Once feedback is stable enough, the optimizer can emit specialized machine code.", + "tags": ["parser", "bytecode", "jit", "optimization"], + "references": ["Parser & AST Generation", "JIT Compilation Pipeline", "JavaScript Engine"] + }, + { + "id": "javascript-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "A memory investigation surfaces four symptoms and you want to map each one to the most relevant garbage-collection concept.", + "prompt": "Match each symptom to the best explanation.", + "premises": [ + "Objects that outlive several scavenges are moved into a region collected less frequently", + "A cache should forget entries when the key object has no other strong references", + "Thousands of temporary arrays created during parsing mostly die almost immediately", + "A button removed from the DOM is still reachable from a closure" + ], + "responses": [ + "Young-generation collection handles short-lived objects efficiently", + "Promotion to old space after surviving collection", + "Weak associations avoid keeping keys alive", + "A strong reference is preventing reclamation" + ], + "correctAnswer": [1, 2, 0, 3], + "explanation": "Long-lived objects are promoted, weak associations avoid keeping keys alive, short-lived objects are exactly what generational collection optimizes for, and a closure can keep a detached object strongly reachable.", + "tags": ["gc", "promotion", "weak-references", "retention"], + "references": ["Garbage Collection", "Memory Management", "Memory Leaks"] + }, + { + "id": "javascript-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "API clients build response objects inconsistently, and a hot renderer repeatedly reads data.id. Perf traces show polymorphic inline cache misses.", + "prompt": "Which refactor most directly restores monomorphic property access?", + "options": [ + "Create all response objects through one factory that initializes properties in the same order", + "Replace dot access with bracket access for every property read", + "Round-trip each object through JSON serialization before rendering", + "Call Object.freeze on the object before every render" + ], + "correctAnswer": 0, + "explanation": "Stable property creation order tends to produce stable hidden classes, which improves inline-cache predictability. The other options do not directly fix shape instability.", + "tags": ["monomorphic", "hidden-classes", "property-access"], + "references": ["V8 Runtime Features", "JIT Compilation Pipeline", "Memory Heap & Objects"] + }, + { + "id": "javascript-q07", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "Inside a browser click handler, your code schedules several follow-up actions and you need to predict which ones run at the microtask checkpoint.", + "prompt": "Which entries are processed as microtasks rather than ordinary later tasks?", + "options": [ + "A Promise.then reaction", + "A queueMicrotask callback", + "A setTimeout callback with zero delay", + "A MessageChannel port message handler", + "A MutationObserver callback" + ], + "correctAnswer": [0, 1, 4], + "explanation": "Promise reactions, queueMicrotask callbacks, and MutationObserver callbacks run during the microtask checkpoint. setTimeout and MessageChannel handlers are task-based.", + "tags": ["microtasks", "task-queues", "mutationobserver"], + "references": ["Task Queues & Priority", "Event Loop & Coordination", "Web APIs & Platform"] + }, + { + "id": "javascript-q08", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A recursive serializer crashes with Maximum call stack size exceeded even though the process still has plenty of free heap memory.", + "prompt": "Which resource was exhausted first?", + "options": [ + "The call stack frame budget for nested executions", + "The young-generation allocation limit", + "The inline-cache metadata table", + "The parser token buffer" + ], + "correctAnswer": 0, + "explanation": "This error indicates runaway synchronous nesting exhausted stack space. It is separate from heap exhaustion.", + "tags": ["recursion", "stack-overflow", "execution-context"], + "references": ["Call Stack & Execution", "JavaScript Engine"] + }, + { + "id": "javascript-q09", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A bundler plugin needs to reject invalid source before any runtime semantics like hoisting or temporal dead zones are involved.", + "prompt": "Which stage produces the structure the plugin should inspect?", + "options": [ + "Parser and AST generation", + "Garbage collector mark phase", + "Event loop scheduler", + "Task queue prioritizer" + ], + "correctAnswer": 0, + "explanation": "The parser converts source into an AST that tooling can inspect without executing the program. GC and scheduling happen later and serve different purposes.", + "tags": ["ast", "static-analysis", "parser"], + "references": ["Parser & AST Generation", "JavaScript Engine"] + }, + { + "id": "javascript-q10", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An async function does synchronous setup, awaits a Promise, and later continues with more work.", + "prompt": "True or false: the code after await resumes in the same uninterrupted synchronous stack frame without re-entering through the microtask queue.", + "options": ["False", "True"], + "correctAnswer": 0, + "explanation": "After await, the current execution unwinds. The continuation is scheduled to resume later when the awaited Promise settles, typically through the microtask queue.", + "tags": ["async-await", "microtasks", "continuations"], + "references": ["Call Stack & Execution", "Event Loop & Coordination"] + }, + { + "id": "javascript-q11", + "type": "ordering", + "difficulty": "advanced", + "scenario": "A user click triggers a handler that queues both a Promise reaction and a zero-delay timer.", + "prompt": "Put these checkpoints in the order they occur from earliest to latest.", + "items": [ + "The timer callback runs on a later task", + "The click handler body begins running", + "Queued microtasks are drained", + "The handler queues a Promise reaction and a timer, then returns" + ], + "correctAnswer": [ + "The click handler body begins running", + "The handler queues a Promise reaction and a timer, then returns", + "Queued microtasks are drained", + "The timer callback runs on a later task" + ], + "explanation": "The click handler runs in the current task, then microtasks drain before the event loop picks a later task such as the timer callback.", + "tags": ["click-events", "timers", "promise-reactions"], + "references": ["Event Loop & Coordination", "Task Queues & Priority", "JavaScript Runtime"] + }, + { + "id": "javascript-q12", + "type": "matching", + "difficulty": "advanced", + "scenario": "You are reviewing several suspected leak reports from a single-page application.", + "prompt": "Match each code smell to its most likely retention cause.", + "premises": [ + "A closure in a long-lived module keeps an array of request payloads forever", + "A DOM node removed from the page still has a listener registered on a global bus", + "A memoization cache uses plain Map with request objects as keys and never evicts", + "A debugging interval continues appending snapshots to an array after the feature is hidden" + ], + "responses": [ + "Unbounded cache with strong keys", + "Background producer continues creating reachable objects", + "Detached object is still referenced by application code", + "Historical data is retained by a closure with longer lifetime" + ], + "correctAnswer": [3, 2, 0, 1], + "explanation": "Closures can retain historical data, detached nodes leak when application references remain, plain Map can become an unbounded strong-key cache, and an interval can keep producing and retaining new objects.", + "tags": ["leaks", "closures", "detached-dom", "intervals"], + "references": ["Memory Leaks", "Memory Management", "Web APIs & Platform"] + }, + { + "id": "javascript-q13", + "type": "single-choice", + "difficulty": "intermediate", + "scenario": "A library works in browsers but fails in Node because it assumes a certain host feature always exists.", + "prompt": "Which API is browser-specific rather than part of the core language itself?", + "options": ["document.querySelector", "Array.prototype.map", "Promise", "JSON.parse"], + "correctAnswer": 0, + "explanation": "The DOM is a host-provided platform API. Array methods, Promise, and JSON are language-level features available across runtimes.", + "tags": ["runtime-vs-language", "dom", "host-apis"], + "references": ["Engine & Runtime Comparison", "Web APIs & Platform", "JavaScript Runtime"] + }, + { + "id": "javascript-q14", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "You need a cache for ephemeral wrapper objects and want the cache design to avoid extending their lifetime unnecessarily.", + "prompt": "Which choices help reduce the risk of turning the cache into a leak?", + "options": [ + "Use a WeakMap keyed by the wrapper objects", + "Keep a backup array containing every key object ever seen", + "Remove any separate strong references when the data is no longer needed", + "Serialize objects into strings and store those strings in a long-lived Map", + "Scope the cache to the feature lifecycle instead of the whole application" + ], + "correctAnswer": [0, 2, 4], + "explanation": "WeakMap helps with key lifetimes, removing strong references prevents accidental retention, and limiting cache scope reduces long-lived reachability. The other choices preserve data longer, not shorter.", + "tags": ["weakmap", "cache-design", "retention"], + "references": ["Memory Management", "Memory Leaks", "Memory Heap & Objects"] + }, + { + "id": "javascript-q15", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A panel is removed from the DOM, but a heap snapshot still shows the entire subtree reachable through a closure stored in a top-level module variable.", + "prompt": "Why can the garbage collector not reclaim the panel yet?", + "options": [ + "The closure is still part of a strong reference path to the panel objects", + "Old-space objects are never collected after promotion", + "DOM nodes are outside JavaScript memory management entirely", + "Minor GC only runs after full page navigation" + ], + "correctAnswer": 0, + "explanation": "If an object is still strongly reachable, it is live from the collector's perspective. Promotion and DOM status do not change that basic rule.", + "tags": ["strong-references", "closures", "heap-snapshots"], + "references": ["Memory Leaks", "Garbage Collection", "Memory Heap & Objects"] + }, + { + "id": "javascript-q16", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A team considers moving a CPU-heavy transformation from the main thread into a Web Worker because typing and scrolling become janky during the transform.", + "prompt": "What is the primary benefit of that move?", + "options": [ + "It prevents the heavy work from blocking the main thread event loop and UI responsiveness", + "It makes the parser skip AST generation for the worker code", + "It guarantees zero transfer cost for all worker messages", + "It upgrades worker callbacks to microtasks with higher priority than UI work" + ], + "correctAnswer": 0, + "explanation": "Workers move computation off the main thread, which helps preserve responsiveness. They do not remove parsing, guarantee zero-copy transfer in all cases, or magically change callback priority rules.", + "tags": ["web-workers", "responsiveness", "main-thread"], + "references": ["JavaScript Runtime", "Web APIs & Platform", "Event Loop & Coordination"] + }, + { + "id": "javascript-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A hot analytics function optimizes during a synthetic benchmark but becomes unstable in production traffic.", + "prompt": "Which production behaviors commonly force optimized code to bail out or avoid specialization?", + "options": [ + "The function sometimes receives arrays with holey elements and sometimes packed elements", + "Object arguments arrive with multiple incompatible hidden classes", + "All calls come from one site with the same numeric types", + "A property is deleted from reused objects before the function reads it", + "The function body relies on stable monomorphic property access" + ], + "correctAnswer": [0, 1, 3], + "explanation": "Array representation changes, incompatible shapes, and delete all reduce optimization stability. Stable numeric types and monomorphic access are the good cases the optimizer wants.", + "tags": ["jit", "deopt", "arrays", "hidden-classes"], + "references": ["JIT Compilation Pipeline", "V8 Runtime Features", "JavaScript Engine"] + }, + { + "id": "javascript-q18", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A reviewer sees async callbacks scheduled inside a for loop that uses let for the loop index and asks why the values stay correct.", + "prompt": "What semantic property of let makes this pattern safe compared with var?", + "options": [ + "Each iteration gets a distinct lexical binding captured by the callback", + "let variables are always heap-allocated instead of stack-allocated", + "let makes timer callbacks run as microtasks", + "let forces the engine to optimize the loop body" + ], + "correctAnswer": 0, + "explanation": "let creates a new binding per iteration in this context, so each callback closes over a different variable instance.", + "tags": ["let", "closures", "loop-bindings"], + "references": ["Call Stack & Execution", "Memory Heap & Objects"] + }, + { + "id": "javascript-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An SSR process constructs millions of temporary objects during template expansion, yet the collector handles the pattern reasonably well most of the time.", + "prompt": "Which garbage-collection behavior best explains why this workload can still be efficient?", + "options": [ + "Generational collectors are optimized for objects that die young", + "Mark-sweep only runs when the call stack overflows", + "Promoted objects are never revisited", + "WeakMap keys are always stored off-heap" + ], + "correctAnswer": 0, + "explanation": "Generational collection assumes many objects die young, so short-lived allocation bursts are a case it is designed to handle efficiently.", + "tags": ["generational-gc", "allocation", "short-lived-objects"], + "references": ["Garbage Collection", "Memory Management"] + }, + { + "id": "javascript-q20", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "You are moving logic from the browser main thread into a dedicated Web Worker and auditing unsupported assumptions.", + "prompt": "Which capabilities are generally unavailable inside a dedicated Web Worker?", + "options": [ + "Direct synchronous access to the page DOM", + "fetch", + "postMessage", + "window.alert", + "document.createElement" + ], + "correctAnswer": [0, 3, 4], + "explanation": "Workers can use fetch and postMessage, but they do not have direct DOM access and do not expose main-window UI functions such as alert.", + "tags": ["workers", "dom", "host-capabilities"], + "references": ["Engine & Runtime Comparison", "Web APIs & Platform", "JavaScript Runtime"] + }, + { + "id": "javascript-q21", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A security rule must warn on every use of optional chaining in a sensitive folder without executing any application code.", + "prompt": "What artifact should the rule analyze to be precise about syntax location and nesting?", + "options": [ + "The AST produced by parsing the source", + "The call stack captured at runtime", + "The garbage collector mark graph", + "The browser task queue trace" + ], + "correctAnswer": 0, + "explanation": "Static syntax-aware rules operate on the AST, which preserves node structure and source positions without needing runtime execution.", + "tags": ["ast", "linters", "static-analysis"], + "references": ["Parser & AST Generation", "Visualization"] + }, + { + "id": "javascript-q22", + "type": "true-false", + "difficulty": "expert", + "scenario": "A factory function returns a closure that still references a large configuration object after the factory itself has already returned.", + "prompt": "True or false: once the creator function returns, the configuration object becomes collectible even if the returned closure still references it.", + "options": ["True", "False"], + "correctAnswer": 1, + "explanation": "Closures extend the lifetime of values they reference. Returning from the outer function does not free captured objects if an inner function can still reach them.", + "tags": ["closures", "liveness", "memory-lifetime"], + "references": ["Memory Heap & Objects", "Garbage Collection", "Memory Management"] + }, + { + "id": "javascript-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "In Node.js, a maintainer observes that process.nextTick can starve I/O when abused and wants a browser-side mental model for the same failure mode.", + "prompt": "What is the closest analogue in browser JavaScript?", + "options": [ + "Scheduling an unbounded chain of microtasks that runs before the next task", + "Using setTimeout with a 16 ms delay", + "Parsing the same script twice during navigation", + "Allocating objects in old space" + ], + "correctAnswer": 0, + "explanation": "Like nextTick abuse, an unbounded microtask chain can monopolize execution before the event loop can move on to other tasks such as input or timers.", + "tags": ["node", "microtasks", "starvation"], + "references": ["Task Queues & Priority", "JavaScript Runtime", "Event Loop & Coordination"] + }, + { + "id": "javascript-q24", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A performance dashboard shows rising memory after a modal is opened and closed 50 times, and the team wants evidence that this is a real leak rather than normal allocation churn.", + "prompt": "Which observations are stronger evidence of an actual leak?", + "options": [ + "Retained size stays elevated across repeated interaction cycles and multiple GC opportunities", + "Heap usage drops near baseline after the modal closes and the page idles", + "Heap snapshots repeatedly show detached modal nodes retained by the same listener path", + "A short allocation spike appears during animation and then settles", + "Listener counts and timer registrations increase with each open-close cycle" + ], + "correctAnswer": [0, 2, 4], + "explanation": "Persistent retained size, repeated detached-node retention paths, and steadily growing listeners or timers all indicate objects remain reachable over time. Temporary spikes that settle do not.", + "tags": ["leak-detection", "heap-snapshots", "retained-size"], + "references": ["Memory Leaks", "Garbage Collection", "Visualization"] + }, + { + "id": "javascript-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A hot numerical function becomes noticeably slower after a debug flag enables eval inside the function body.", + "prompt": "What is the best explanation for the slowdown?", + "options": [ + "Dynamic code evaluation makes optimization and scope analysis much harder for the engine", + "eval automatically promotes every local object to old space", + "eval converts all microtasks into macrotasks", + "eval disables parsing and executes text one character at a time" + ], + "correctAnswer": 0, + "explanation": "eval reduces the engine's ability to reason about scopes and optimize aggressively. The other options are not how eval affects execution.", + "tags": ["eval", "optimization", "scope-analysis"], + "references": ["JIT Compilation Pipeline", "JavaScript Engine", "Call Stack & Execution"] + }, + { + "id": "javascript-q26", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "The same JavaScript bundle runs in two environments, but one exposes Deno.readTextFile while the other exposes fs.readFile.", + "prompt": "What does this difference primarily illustrate?", + "options": [ + "The runtime provides host APIs beyond the core language engine", + "The parser generates a different AST for identical source", + "Promises are specified differently across engines", + "JavaScript history split the language into incompatible dialects" + ], + "correctAnswer": 0, + "explanation": "The language core is distinct from the host environment. Different runtimes expose different platform APIs around the same language semantics.", + "tags": ["runtime", "host-apis", "cross-platform"], + "references": ["Engine & Runtime Comparison", "JavaScript Runtime", "JavaScript History"] + }, + { + "id": "javascript-q27", + "type": "matching", + "difficulty": "advanced", + "scenario": "You are onboarding a teammate and want a concise mental model of the main runtime pieces involved in JavaScript execution.", + "prompt": "Match each runtime component to its main responsibility.", + "premises": ["Call stack", "Memory heap", "Web APIs or host environment", "Task queues"], + "responses": [ + "Stores scheduled work until the event loop selects it", + "Holds objects, closures, and other dynamically allocated values", + "Executes synchronous frames in last-in, first-out order", + "Exposes timers, networking, and DOM-related capabilities" + ], + "correctAnswer": [2, 1, 3, 0], + "explanation": "The call stack handles synchronous execution, the heap stores dynamic data, host APIs provide environment-specific capabilities, and task queues hold scheduled work.", + "tags": ["runtime-model", "heap", "stack", "host-apis"], + "references": [ + "JavaScript Runtime", + "Call Stack & Execution", + "Memory Heap & Objects", + "Web APIs & Platform", + "Task Queues & Priority" + ] + }, + { + "id": "javascript-q28", + "type": "single-choice", + "difficulty": "expert", + "scenario": "You need to attach metadata to third-party objects without preventing those objects from being collected and without polluting normal enumeration.", + "prompt": "Which option best fits the requirement?", + "options": [ + "A WeakMap keyed by the third-party objects", + "A global array of key and metadata pairs", + "A Map keyed by JSON stringified versions of the objects", + "A normal enumerable _meta property added to each object" + ], + "correctAnswer": 0, + "explanation": "WeakMap associates metadata without forcing key lifetime and without adding enumerable properties to the object itself.", + "tags": ["weakmap", "metadata", "garbage-collection"], + "references": ["Memory Management", "Memory Leaks", "Memory Heap & Objects"] + }, + { + "id": "javascript-q29", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team replaces many timers with chained Promise callbacks and then notices input lag under heavy load.", + "prompt": "Which statements explain why the switch can cause jank?", + "options": [ + "A long microtask chain can delay the event loop from reaching rendering and input tasks", + "Promise callbacks always run on a separate OS thread, so they should reduce UI contention", + "Continuously queueing new microtasks from microtasks can starve later tasks", + "Browsers flush the microtask queue at defined checkpoints before taking the next task", + "Using microtasks automatically yields control after every callback" + ], + "correctAnswer": [0, 2, 3], + "explanation": "Microtasks run before the event loop proceeds to the next task, so long or self-perpetuating microtask chains can delay rendering and input processing rather than help it.", + "tags": ["microtask-starvation", "promises", "input-lag"], + "references": ["Event Loop & Coordination", "Task Queues & Priority", "Visualization"] + }, + { + "id": "javascript-q30", + "type": "single-choice", + "difficulty": "intermediate", + "scenario": "A page loads several scripts, and one of them depends on the DOM being fully parsed but should not block HTML parsing itself.", + "prompt": "Which loading strategy is safer for that script?", + "options": [ + "Use defer", + "Use async", + "Use an inline classic script in the head", + "Use document.write to inject it" + ], + "correctAnswer": 0, + "explanation": "defer waits until parsing is complete before execution and does not block parsing in the way a classic inline script does. async may execute as soon as it finishes loading, which can be earlier than desired.", + "tags": ["script-loading", "defer", "dom-readiness"], + "references": ["Web APIs & Platform", "JavaScript Runtime", "Introduction"] + }, + { + "id": "javascript-q31", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teaching visualization shows this sequence after a click: handler start, handler end, promise callback, timeout callback.", + "prompt": "Which explanation best matches that timeline?", + "options": [ + "The Promise reaction ran in the microtask checkpoint before the later timer task", + "The timeout callback skipped the queue because timers outrank Promise reactions", + "The handler and timeout shared one synchronous call stack frame", + "The Promise callback only ran because garbage collection finished first" + ], + "correctAnswer": 0, + "explanation": "After the click task completes, the microtask checkpoint runs Promise reactions before the event loop takes a later timer task, which matches the displayed sequence.", + "tags": ["visualization", "event-loop", "promises", "timers"], + "references": ["Visualization", "Event Loop & Coordination", "Task Queues & Priority"] + } + ] +} diff --git a/quiz-banks/nextjs.quiz.json b/quiz-banks/nextjs.quiz.json new file mode 100644 index 0000000..c3ca02d --- /dev/null +++ b/quiz-banks/nextjs.quiz.json @@ -0,0 +1,861 @@ +{ + "moduleId": "nextjs", + "moduleTitle": "Next.js", + "description": "Scenario-based assessment focused on advanced Next.js architecture, routing, rendering strategies, server-first data flows, middleware behavior, and performance optimization tradeoffs.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "nextjs-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A SaaS team is migrating a dashboard from the Pages Router. They want the dashboard shell to persist while users move between nested screens, and they want the default path to remain server-first instead of shipping unnecessary client JavaScript.", + "prompt": "Which design best matches that goal?", + "options": [ + "Place shared dashboard UI in app/dashboard/layout.tsx and keep route-specific screens in nested page.tsx files", + "Keep every screen in pages/ and rely on _app.tsx plus client-side data fetching for persistence", + "Mark the entire app/ tree with 'use client' so layout state is always preserved", + "Move the dashboard shell into middleware so it runs before each request" + ], + "correctAnswer": 0, + "explanation": "The App Router's nested layout model is built for persistent shared UI across related routes. Keeping the shared shell in a segment layout preserves structure during navigation while still allowing route pages to remain server-first by default.", + "tags": [ + "app-router", + "nested-layouts", + "routing", + "server-first" + ], + "references": [ + "Routing Systems", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is standardizing file conventions in a new App Router codebase and wants to avoid inventing custom patterns that fight the framework.", + "prompt": "Which statements about built-in route segment files are correct?", + "options": [ + "A layout.tsx file can wrap descendant pages and nested segments with shared UI", + "A loading.tsx file can provide an immediate fallback while a segment is waiting on data", + "A page.tsx file represents the leaf UI for a route segment", + "A route.ts file is the preferred place to render visible page markup for browser navigation", + "An _app.tsx file is required to share UI in the App Router" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "layout.tsx, loading.tsx, and page.tsx are core App Router conventions. route.ts is for HTTP request handling, not visual page UI, and _app.tsx belongs to the legacy Pages Router model rather than the App Router.", + "tags": [ + "file-conventions", + "app-router", + "layouts", + "loading-states" + ], + "references": [ + "Routing Systems" + ] + }, + { + "id": "nextjs-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A public article page is server-rendered and then hydrated on the client. Product managers are confused because they see content appear before buttons become interactive.", + "prompt": "True or false: users can see meaningful HTML content before client-side interactivity is attached.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Server-rendered HTML can be visible as soon as it arrives, while hydration happens later to attach interactivity. That separation is a major reason server rendering improves initial perception and SEO.", + "tags": [ + "hydration", + "ssr", + "initial-render", + "seo" + ], + "references": [ + "Rendering Strategies", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are explaining the high-level lifecycle of a first request to an App Router page that uses middleware and includes some interactive client islands.", + "prompt": "Arrange the events from earliest to latest.", + "items": [ + "Middleware evaluates the incoming request", + "The server matches route segments and renders the response", + "The browser receives initial content and can display it", + "Client-side JavaScript hydrates the interactive parts" + ], + "correctAnswer": [ + "Middleware evaluates the incoming request", + "The server matches route segments and renders the response", + "The browser receives initial content and can display it", + "Client-side JavaScript hydrates the interactive parts" + ], + "explanation": "Middleware runs before the route logic, then the server resolves segments and renders output. The browser can show that output before the client bundle completes hydration for interactive islands.", + "tags": [ + "request-lifecycle", + "middleware", + "rendering", + "hydration" + ], + "references": [ + "Middleware & Route Handlers", + "Rendering Strategies", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "A platform review compares several page types and wants each one mapped to the rendering strategy that best fits its constraints.", + "prompt": "Match each page scenario to the best rendering strategy.", + "premises": [ + "A product launch page changes only when marketing republishes the site", + "A banking dashboard shows per-user balances that must be fresh on each request", + "A catalog page needs CDN speed but should refresh every few minutes as inventory changes", + "An internal browser-based tool is highly interactive and SEO is irrelevant" + ], + "responses": [ + "Static Site Generation", + "Server-Side Rendering", + "Incremental Static Regeneration", + "Client-Side Rendering" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Stable public content is best suited to SSG, personalized per-request content aligns with SSR, periodically changing but cache-friendly content fits ISR, and SEO-insensitive interactive tools often tolerate CSR.", + "tags": [ + "rendering-strategies", + "ssg", + "ssr", + "isr", + "csr" + ], + "references": [ + "Rendering Strategies" + ] + }, + { + "id": "nextjs-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An article page needs a complex comparison widget with browser events and local state, but the rest of the page is static text fetched on the server. Bundle size has become a problem.", + "prompt": "What is the best architectural move?", + "options": [ + "Keep the page server-rendered and isolate the comparison widget behind the smallest possible client boundary", + "Add 'use client' to the entire route so the widget can coexist with the article content", + "Move all article fetching into useEffect so everything lives in one client tree", + "Run the comparison widget in middleware before the page is rendered" + ], + "correctAnswer": 0, + "explanation": "The best pattern is to keep as much of the tree on the server as possible and introduce a client island only where interactivity is required. Marking the entire route as client-rendered increases bundle size and throws away the server-first model.", + "tags": [ + "client-boundaries", + "bundle-size", + "client-islands", + "rsc" + ], + "references": [ + "Server & Client Components", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is reviewing a mixed component tree and wants to identify which pieces truly require the browser.", + "prompt": "Which components must be Client Components?", + "options": [ + "A color picker that uses useState and onClick handlers", + "A theme toggle that writes to localStorage", + "A product description component that only renders serializable props from the server", + "A chart that reads window resize events to redraw itself", + "A database-backed component that directly accesses server-only credentials" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Stateful event handling, browser APIs like localStorage, and direct window access require client execution. Pure presentational server-fed markup can stay on the server, and secret-bearing server-only logic must not move into the client bundle.", + "tags": [ + "use-client", + "browser-apis", + "component-boundaries", + "interactivity" + ], + "references": [ + "Server & Client Components" + ] + }, + { + "id": "nextjs-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "Two sibling Server Components call the same fetch URL with the same options during one render of a catalog page. Ops expects two origin hits but only sees one.", + "prompt": "What is the most likely explanation?", + "options": [ + "Next.js memoizes identical fetch requests during the same render pass", + "The browser automatically merges server fetches before the request reaches the network", + "Middleware converts duplicate server fetches into cached route handlers", + "React prevents multiple awaits for the same promise only inside Client Components" + ], + "correctAnswer": 0, + "explanation": "Next.js deduplicates identical fetch calls during a render pass, which reduces repeated work and avoids duplicate origin calls when the URL and options match.", + "tags": [ + "fetch", + "memoization", + "data-fetching", + "server-components" + ], + "references": [ + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q09", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A checkout form must continue working for users behind restrictive corporate networks that block or delay client-side JavaScript. The team also wants server-side validation and mutation logic.", + "prompt": "Which pattern is the best fit?", + "options": [ + "Submit an HTML form to a Server Action so the flow progressively enhances when JavaScript is available", + "Handle submission only in a Client Component with fetch inside an onSubmit handler", + "Perform the purchase mutation inside middleware because it runs before every request", + "Render the form statically and ask the browser to store the order in localStorage until JavaScript loads" + ], + "correctAnswer": 0, + "explanation": "Server Actions are designed for server-side mutations and progressive enhancement. A standard form can submit even without client JavaScript, while enhanced behavior is layered on when the browser is capable.", + "tags": [ + "server-actions", + "forms", + "progressive-enhancement", + "mutations" + ], + "references": [ + "Data Fetching & Mutations", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q10", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An authentication team plans to perform multiple database joins in middleware for every request because the code will run at the edge.", + "prompt": "True or false: heavy business logic in middleware is usually a good default because middleware sits early in the pipeline.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Middleware should stay lightweight because it runs on matched requests and directly affects latency. Expensive business logic is usually better handled deeper in the application where it is needed rather than on every request path.", + "tags": [ + "middleware", + "latency", + "edge-runtime", + "performance" + ], + "references": [ + "Middleware & Route Handlers", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q11", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An e-commerce catalog contains 100,000 product pages. Search visibility matters, pages should be fast globally, and inventory changes every 15 minutes, but the business wants to avoid request-time rendering for every visit.", + "prompt": "Which rendering strategy is the strongest fit?", + "options": [ + "Server-Side Rendering", + "Static Site Generation without regeneration", + "Incremental Static Regeneration", + "Client-Side Rendering" + ], + "correctAnswer": 2, + "explanation": "ISR provides CDN-speed static delivery while allowing content to refresh periodically. It is specifically useful when content changes regularly but not on every single request.", + "tags": [ + "isr", + "catalog", + "seo", + "cdn" + ], + "references": [ + "Rendering Strategies", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q12", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A public landing page has poor Largest Contentful Paint and excessive initial JavaScript. The team wants improvements without deleting important features.", + "prompt": "Which changes are most likely to help?", + "options": [ + "Use Next.js image optimization with responsive sizing for above-the-fold imagery", + "Dynamically import non-critical interactive widgets so they are not in the initial bundle", + "Promote the entire page to a Client Component to simplify the tree", + "Keep above-the-fold textual content server-rendered or statically generated when possible", + "Send the same oversized hero image to all devices to reduce variant generation" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Responsive image optimization, code splitting for non-critical widgets, and shipping server-rendered primary content all improve initial load behavior. Expanding the client boundary or shipping oversized assets usually worsens it.", + "tags": [ + "lcp", + "image-optimization", + "dynamic-import", + "bundle-size" + ], + "references": [ + "Optimization & Performance", + "Rendering Strategies", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q13", + "type": "single-choice", + "difficulty": "expert", + "scenario": "After migrating to the App Router, a team notices that a simple route now ships far more JavaScript than expected. The root layout was marked with 'use client' to support one interactive search box in the header.", + "prompt": "What is the most likely cause of the bundle increase?", + "options": [ + "A client boundary high in the tree pulls a much larger subtree into the client-side graph", + "The App Router always ships more JavaScript than the Pages Router for equivalent pages", + "Middleware duplicates every layout file into the browser bundle", + "Server Actions automatically force all ancestors to run on the client" + ], + "correctAnswer": 0, + "explanation": "Placing 'use client' high in the tree can expand the client-side portion of the application dramatically. The better pattern is to isolate the interactive header behavior behind a smaller client boundary rather than converting the root layout.", + "tags": [ + "use-client", + "bundle-analysis", + "layout-boundaries", + "performance" + ], + "references": [ + "Server & Client Components", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q14", + "type": "ordering", + "difficulty": "expert", + "scenario": "A content editor submits a form backed by a Server Action that writes to the database and refreshes the listing page.", + "prompt": "Order the mutation flow from start to finish.", + "items": [ + "The browser submits the form to the server-side action", + "The server validates input and performs the mutation", + "The affected path or cache entry is revalidated", + "The response returns updated UI or fresh data to the user" + ], + "correctAnswer": [ + "The browser submits the form to the server-side action", + "The server validates input and performs the mutation", + "The affected path or cache entry is revalidated", + "The response returns updated UI or fresh data to the user" + ], + "explanation": "A progressive form submission reaches the Server Action first, then the server performs validation and mutation, triggers revalidation where necessary, and finally returns a refreshed result to the user.", + "tags": [ + "server-actions", + "forms", + "revalidation", + "data-mutations" + ], + "references": [ + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q15", + "type": "matching", + "difficulty": "expert", + "scenario": "A new engineer keeps mixing up App Router file roles during code reviews.", + "prompt": "Match each file convention to its primary responsibility.", + "premises": [ + "layout.tsx inside app/dashboard/", + "loading.tsx inside a route segment", + "error.tsx for a route segment", + "route.ts inside app/api/orders/" + ], + "responses": [ + "Persistent shared UI for descendant routes", + "Immediate fallback while a segment is loading", + "Recoverable boundary for segment-level failures", + "HTTP request handling for methods such as GET or POST" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Each file has a specific role in App Router structure. layout.tsx shares UI, loading.tsx provides pending UI, error.tsx handles segment-level failures, and route.ts defines request handlers rather than visible page markup.", + "tags": [ + "app-router", + "file-conventions", + "error-boundaries", + "route-handlers" + ], + "references": [ + "Routing Systems", + "Middleware & Route Handlers" + ] + }, + { + "id": "nextjs-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A developer inspects an RSC payload and is surprised that it contains serializable data and component structure but not click handlers or direct DOM behavior.", + "prompt": "Which explanation is most accurate?", + "options": [ + "Server Components describe what should render and pass serializable data, while interactivity is attached later by Client Components during hydration", + "The payload is incomplete because middleware strips all event handlers before streaming the page", + "React sends event listeners only when the page is using SSR rather than Server Components", + "The browser refuses to hydrate handlers that originate from TypeScript source files" + ], + "correctAnswer": 0, + "explanation": "Server Components are not responsible for browser event handling. They produce render instructions and serializable props, and client-side interactivity is introduced by Client Components once the browser hydrates them.", + "tags": [ + "rsc-payload", + "hydration", + "serializable-props", + "server-components" + ], + "references": [ + "Server & Client Components" + ] + }, + { + "id": "nextjs-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A platform team is deciding which cross-cutting concerns belong in middleware rather than in individual pages or deeper API logic.", + "prompt": "Which tasks are strong candidates for middleware?", + "options": [ + "Redirect anonymous users away from protected routes before the page logic runs", + "Attach security-related headers for matched requests", + "Run expensive report aggregation for every incoming request", + "Perform locale-based rewrites or redirects close to the edge", + "Render personalized dashboard JSX directly in the middleware file" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Middleware is well suited to lightweight request interception such as auth gating, rewrites, redirects, and security header manipulation. Heavy business logic and full page rendering do not belong there.", + "tags": [ + "middleware", + "auth", + "headers", + "rewrites" + ], + "references": [ + "Middleware & Route Handlers", + "Routing Systems" + ] + }, + { + "id": "nextjs-q18", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An editor updates a blog post through a Server Action. The write succeeds, but readers keep seeing the cached page until the next scheduled refresh window.", + "prompt": "What is the most direct fix?", + "options": [ + "Call revalidatePath for the affected route after the mutation", + "Add 'use client' to the blog page so it ignores the cache", + "Move the blog fetch into useEffect so the browser always refetches", + "Wrap the post body in Suspense without touching the cache strategy" + ], + "correctAnswer": 0, + "explanation": "When a mutation should invalidate cached or statically regenerated content immediately, revalidatePath is the targeted mechanism for refreshing the affected route.", + "tags": [ + "revalidatePath", + "cache-invalidation", + "server-actions", + "content-updates" + ], + "references": [ + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A company needs a stable JSON endpoint consumed by a mobile app and third-party partners. The web app also uses internal form mutations elsewhere, but this endpoint must expose standard HTTP semantics.", + "prompt": "Which Next.js abstraction should power that public endpoint?", + "options": [ + "A Server Action", + "A Route Handler", + "A Client Component", + "A loading.tsx segment file" + ], + "correctAnswer": 1, + "explanation": "Route Handlers are the correct fit for explicit HTTP endpoints with methods such as GET and POST. Server Actions are primarily designed for app-internal mutation flows rather than general external API contracts.", + "tags": [ + "route-handlers", + "api", + "http", + "integration" + ], + "references": [ + "Middleware & Route Handlers", + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q20", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A teammate writes a component under app/ that does not reference browser APIs, hooks, or 'use client'. They assume it still runs in the browser unless configured otherwise.", + "prompt": "True or false: in the App Router, that component is treated as a Server Component by default.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "App Router components are server components by default unless a client boundary is introduced. That default is what enables the server-first architecture and smaller client bundles.", + "tags": [ + "defaults", + "server-components", + "app-router", + "use-client" + ], + "references": [ + "Introduction", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q21", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A stock information page was initially built with plain static generation. It now serves stale numbers until the next deployment, but the company still does not want the cost of full request-time rendering for every view.", + "prompt": "What is the best compromise?", + "options": [ + "Switch to Client-Side Rendering for the primary content", + "Use Incremental Static Regeneration with an appropriate revalidation window", + "Keep plain Static Site Generation and trigger a full redeploy after every quote change", + "Run all quote updates through middleware on every request" + ], + "correctAnswer": 1, + "explanation": "ISR preserves static delivery for speed while allowing scheduled freshness. It is the standard middle ground between permanently static content and rendering everything anew on each request.", + "tags": [ + "isr", + "staleness", + "caching", + "freshness" + ], + "references": [ + "Rendering Strategies", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q22", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A page loads a client shell, then the parent fetches the user, then a child fetches projects after the user arrives, producing a visible waterfall and poor initial load.", + "prompt": "Which redesign aligns best with Next.js server-first data flow?", + "options": [ + "Move the relevant data fetching into Server Components so requests can start higher in the tree and avoid client-side waterfalls", + "Keep the waterfall but hide it behind a longer skeleton animation", + "Fetch everything sequentially in middleware before routing so the client never sees loading states", + "Convert the entire route into a Client Component to keep all data logic in one place" + ], + "correctAnswer": 0, + "explanation": "Server-side fetching higher in the tree reduces client waterfalls and often allows parallelism. Hiding the problem with skeletons or broad client boundaries does not solve the latency root cause.", + "tags": [ + "waterfalls", + "server-fetching", + "data-flow", + "performance" + ], + "references": [ + "Data Fetching & Mutations", + "Server & Client Components", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q23", + "type": "matching", + "difficulty": "expert", + "scenario": "A performance incident review lists four symptoms and asks the team to map each one to the most relevant optimization technique.", + "prompt": "Match each symptom to the best response.", + "premises": [ + "The largest visible element is an oversized hero image on mobile devices", + "A rarely used admin chart library inflates the initial JavaScript bundle", + "Search crawlers receive a mostly empty shell before JavaScript executes", + "Two sibling Server Components make the same data request during one render" + ], + "responses": [ + "Use Next.js image optimization with responsive sizing and modern formats", + "Split the chart into a dynamically loaded chunk", + "Prefer server-rendered or pre-rendered HTML for the primary content", + "Rely on identical fetch memoization during the render pass" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Each symptom points to a different built-in advantage: next/image for media delivery, dynamic imports for trimming the initial bundle, server rendering for crawler-visible content, and fetch memoization for duplicate request elimination.", + "tags": [ + "performance-audit", + "image-optimization", + "dynamic-import", + "seo", + "memoization" + ], + "references": [ + "Optimization & Performance", + "Data Fetching & Mutations", + "Rendering Strategies" + ] + }, + { + "id": "nextjs-q24", + "type": "ordering", + "difficulty": "expert", + "scenario": "A user navigates from /dashboard to /dashboard/settings in an App Router application that uses a shared dashboard layout and a loading.tsx file for the target segment.", + "prompt": "Put the navigation events in the most likely order.", + "items": [ + "The user clicks a client-side link to the sibling segment", + "The shared parent layout remains mounted during the transition", + "The incoming segment can show its loading UI immediately", + "The new server-rendered segment payload arrives", + "The target segment becomes interactive with the updated UI" + ], + "correctAnswer": [ + "The user clicks a client-side link to the sibling segment", + "The shared parent layout remains mounted during the transition", + "The incoming segment can show its loading UI immediately", + "The new server-rendered segment payload arrives", + "The target segment becomes interactive with the updated UI" + ], + "explanation": "App Router navigation can preserve shared layouts, reveal loading UI for the incoming segment, then apply the streamed result and hydrate any interactive pieces needed for the updated view.", + "tags": [ + "navigation", + "nested-layouts", + "loading-ui", + "hydration" + ], + "references": [ + "Routing Systems", + "Rendering Strategies", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q25", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A platform group wants public pages to stay as cache-friendly and deterministic as possible. They are reviewing patterns that quietly push routes into more dynamic behavior.", + "prompt": "Which practices support more static or cacheable rendering?", + "options": [ + "Pre-render marketing pages whose content only changes when the site is republished", + "Use a revalidation window for content that changes periodically but not per request", + "Read request cookies in the page render to personalize HTML for every visitor", + "Keep request-time randomness out of rendered output when deterministic HTML is desired", + "Promote every layout to a Client Component before deciding on a rendering strategy" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Predictable shared content and explicit revalidation windows preserve static or cache-friendly behavior. Per-request personalization and unnecessary client promotion make it harder to keep routes deterministic and efficiently cached.", + "tags": [ + "static-rendering", + "cacheability", + "determinism", + "isr" + ], + "references": [ + "Rendering Strategies", + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q26", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A dashboard sidebar contains tabs and local interaction state that should survive navigation between /dashboard/analytics and /dashboard/billing.", + "prompt": "Where should the shared shell live?", + "options": [ + "In dashboard/layout.tsx so sibling pages swap underneath a persistent parent segment", + "In middleware.ts so it is recreated before every matched request", + "Inside each page.tsx file so every route owns its own copy", + "Inside route.ts so the sidebar is rendered by HTTP handlers" + ], + "correctAnswer": 0, + "explanation": "Shared layout UI belongs in the segment layout so it can persist while child routes change. Duplicating it per page or trying to move it into middleware or route handlers breaks the App Router model.", + "tags": [ + "persistent-ui", + "layouts", + "navigation", + "state-preservation" + ], + "references": [ + "Routing Systems", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A middleware file unintentionally runs for font files, images, and static assets, adding measurable latency to pages that do not need request interception.", + "prompt": "What is the most direct correction?", + "options": [ + "Narrow the matcher configuration so middleware runs only on intended paths", + "Move the middleware logic into the root layout so assets bypass it", + "Replace the middleware with a Client Component that checks every request in the browser", + "Convert all assets into Route Handlers so the middleware becomes faster" + ], + "correctAnswer": 0, + "explanation": "Broad matching causes needless middleware execution. Tightening matcher scope is the direct way to keep the edge layer focused on the routes that actually require interception.", + "tags": [ + "matcher", + "middleware", + "latency", + "request-scope" + ], + "references": [ + "Middleware & Route Handlers", + "Optimization & Performance" + ] + }, + { + "id": "nextjs-q28", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A startup wants its public pricing page to load as fast as possible worldwide and be easy for search engines to crawl. The content only changes during infrequent campaign launches.", + "prompt": "Which rendering strategy is the clearest fit?", + "options": [ + "Static Site Generation", + "Server-Side Rendering", + "Client-Side Rendering", + "Per-request middleware composition" + ], + "correctAnswer": 0, + "explanation": "Stable public content with high SEO and global speed requirements is the classic SSG use case. Pre-rendering and CDN delivery minimize work per request and maximize crawlability.", + "tags": [ + "ssg", + "marketing-pages", + "seo", + "cdn" + ], + "references": [ + "Introduction", + "Rendering Strategies" + ] + }, + { + "id": "nextjs-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A Client Component tries to import a database helper directly so an autocomplete widget can query production data from the browser. The prototype works locally but fails architectural review.", + "prompt": "What is the core problem?", + "options": [ + "Client Components cannot safely own server-only code or secrets, so the read must move behind a server boundary", + "Database queries are forbidden in the App Router even on the server", + "Only middleware is allowed to communicate with a database in Next.js", + "Hydration fails whenever a component reads remote data after mount" + ], + "correctAnswer": 0, + "explanation": "Client Components run in the browser and therefore cannot safely execute server-only logic or carry private credentials. The correct fix is to keep data access on the server and pass results through a safe boundary.", + "tags": [ + "server-only", + "security", + "client-components", + "data-access" + ], + "references": [ + "Server & Client Components", + "Data Fetching & Mutations" + ] + }, + { + "id": "nextjs-q30", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A long-form article page added an advanced visualization that is useful to only a small fraction of readers. The article remains server-rendered, but initial JavaScript cost rose sharply.", + "prompt": "Which change most directly improves first load without removing the visualization entirely?", + "options": [ + "Load the visualization dynamically so it is excluded from the critical initial bundle", + "Mark the entire article page with 'use client' so the bundle becomes more consistent", + "Move the visualization logic into middleware so it runs before the article is served", + "Render the visualization server-side and immediately hydrate the whole page regardless of need" + ], + "correctAnswer": 0, + "explanation": "Dynamic loading is the targeted fix when a rarely used interactive feature bloats the initial bundle. It preserves functionality while keeping that code out of the critical path for most readers.", + "tags": [ + "dynamic-import", + "code-splitting", + "bundle-size", + "article-pages" + ], + "references": [ + "Optimization & Performance", + "Server & Client Components" + ] + }, + { + "id": "nextjs-q31", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An architecture review asks for decisions that improve both SEO and initial load behavior on public content routes without giving up interactive enhancements where they are truly necessary.", + "prompt": "Which decisions best align with that goal?", + "options": [ + "Render stable primary content on the server or at build time instead of relying on client-only fetching", + "Keep non-interactive sections as Server Components whenever possible", + "Split rarely used interactive widgets out of the critical path", + "Fetch the primary article body in useEffect after mount so the HTML stays minimal", + "Apply personalization middleware to every asset request for consistency" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Server-rendered or pre-rendered primary content, smaller client boundaries, and code splitting for low-priority widgets all support better crawlability and faster initial delivery. Deferring primary content to the client or intercepting every asset request works against those goals.", + "tags": [ + "seo", + "initial-load", + "server-components", + "code-splitting" + ], + "references": [ + "Introduction", + "Rendering Strategies", + "Server & Client Components", + "Optimization & Performance" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/nodejs.quiz.json b/quiz-banks/nodejs.quiz.json new file mode 100644 index 0000000..5c37c99 --- /dev/null +++ b/quiz-banks/nodejs.quiz.json @@ -0,0 +1,864 @@ +{ + "moduleId": "nodejs", + "moduleTitle": "Node.js", + "description": "Scenario-based assessment focused on advanced Node.js runtime behavior, event-loop scheduling, async coordination, streams and backpressure, scaling strategies, memory diagnostics, module-system edge cases, package-manager discipline, framework tradeoffs, and cross-runtime portability.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "nodejs-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An API gateway looks asynchronous on paper, but one route computes password hashes with crypto.pbkdf2Sync inside the request handler. During traffic spikes, unrelated lightweight requests also suffer 800 ms latency jumps.", + "prompt": "Which root cause best explains why a few expensive requests delay unrelated clients in the same process?", + "options": [ + "Each synchronous hash blocks the event loop thread for the process", + "The hashes are transparently moved to the libuv thread pool because they are crypto operations", + "The kernel pauses all sockets until the slowest request finishes", + "V8 automatically parallelizes synchronous crypto across idle CPU cores" + ], + "correctAnswer": 0, + "explanation": "The synchronous variant runs on the main thread, so it blocks the event loop and delays every other callback in that process until the hash finishes.", + "tags": [ + "event-loop", + "cpu-bound", + "latency", + "crypto" + ], + "references": [ + "Introduction", + "Event Loop", + "Scaling" + ] + }, + { + "id": "nodejs-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A telemetry agent schedules a recursive chain of process.nextTick callbacks while also receiving steady TCP traffic. Once the chain starts, socket handlers stop firing until the recursion ends.", + "prompt": "Which observations accurately explain the stall?", + "options": [ + "Node drains the nextTick queue before re-entering other event loop phases", + "A long self-scheduling nextTick chain can starve poll and check work", + "Promise microtasks always preempt nextTick in Node.js", + "Yielding periodically with setImmediate would let I/O phases make progress", + "Increasing socket highWaterMark would eliminate the starvation" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "process.nextTick has extremely high priority in Node.js. If code keeps refilling that queue, the runtime can delay I/O phases until the recursion finally yields.", + "tags": [ + "process-nexttick", + "starvation", + "io", + "scheduling" + ], + "references": [ + "Event Loop", + "Async Programming" + ] + }, + { + "id": "nodejs-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "A diagnostics library queues both process.nextTick callbacks and Promise.then reactions from the same synchronous function and wants deterministic ordering.", + "prompt": "True or false: in Node.js, a process.nextTick callback queued at the same moment as a Promise.then reaction will run first.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Node.js processes the nextTick queue before ordinary Promise microtasks, which is why excessive nextTick usage can starve other work.", + "tags": [ + "microtasks", + "process-nexttick", + "promises", + "ordering" + ], + "references": [ + "Event Loop", + "Async Programming" + ] + }, + { + "id": "nodejs-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are walking a new platform engineer through the simplified visible phases of the Node.js event loop after the current script has finished executing.", + "prompt": "Arrange the event loop phases from earliest to latest for a normal Node.js iteration after the current script has finished.", + "items": [ + "Timers", + "Pending callbacks", + "Poll", + "Check", + "Close callbacks" + ], + "correctAnswer": [ + "Timers", + "Pending callbacks", + "Poll", + "Check", + "Close callbacks" + ], + "explanation": "In the simplified view most developers use, timers run first, then pending callbacks, then poll, then check, and finally close callbacks.", + "tags": [ + "event-loop-phases", + "timers", + "poll", + "setimmediate" + ], + "references": [ + "Event Loop" + ] + }, + { + "id": "nodejs-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "A platform team is standardizing concurrency choices across several services and wants the least-complicated primitive that still fits each workload.", + "prompt": "Match each workload to the concurrency primitive that best fits it.", + "premises": [ + "A single process already has enough CPU headroom, and the hard part is multiplexing 50,000 mostly idle WebSocket connections", + "Image thumbnail generation is CPU-heavy JavaScript and should run in parallel without full process isolation", + "An untrusted PDF tool must be invoked with separate memory space and independent crash boundaries", + "One stateless HTTP service should use all CPU cores while continuing to accept connections on one port" + ], + "responses": [ + "Keep it on the main event loop in one process", + "Use worker_threads", + "Use child_process", + "Use cluster" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Evented I/O excels at many mostly idle connections, worker_threads fit CPU-bound parallel JavaScript with shared memory options, child processes provide stronger isolation, and cluster scales stateless network handling across cores.", + "tags": [ + "concurrency", + "worker-threads", + "cluster", + "child-process" + ], + "references": [ + "Introduction", + "Scaling", + "Event Loop" + ] + }, + { + "id": "nodejs-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A file-upload proxy currently concatenates incoming chunks in arrays and writes the full body to a slow object store client only after the upload completes. Under burst traffic, memory spikes before uploads finish.", + "prompt": "Which redesign most directly applies backpressure instead of buffering the entire transfer in user space?", + "options": [ + "Replace the manual buffering loop with a streamed pipeline that writes as data arrives", + "Convert every incoming chunk to UTF-8 strings before storing it", + "Increase every stream's highWaterMark so the buffer can absorb the burst", + "Use Promise.all to flush the buffered chunks in parallel after the upload ends" + ], + "correctAnswer": 0, + "explanation": "A proper stream pipeline preserves backpressure, so the slow destination throttles the source instead of forcing the application to accumulate the whole payload in memory.", + "tags": [ + "streams", + "backpressure", + "memory-pressure", + "uploads" + ], + "references": [ + "Buffers & Streams", + "Async Programming" + ] + }, + { + "id": "nodejs-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A binary framing parser slices headers and payloads out of a shared Buffer pool to avoid extra copies while decoding packets from a TCP stream.", + "prompt": "Which statements are correct about Buffer behavior in this situation?", + "options": [ + "Buffer.allocUnsafe may expose previously used memory until the code overwrites it", + "Buffer.subarray can share the same underlying memory as the original Buffer", + "Buffer.concat is guaranteed to be zero-copy for all inputs", + "Using readUInt32BE instead of readUInt32LE changes how the same bytes are interpreted", + "Converting arbitrary binary chunks to UTF-8 strings before framing is safe" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "allocUnsafe does not zero memory, subarray creates a view, and endianness changes interpretation. Buffer.concat copies into a new allocation, and blindly stringifying binary data can corrupt protocol framing.", + "tags": [ + "buffer", + "binary-protocols", + "endianness", + "zero-copy" + ], + "references": [ + "Buffers & Streams", + "Memory Management" + ] + }, + { + "id": "nodejs-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A response compression chain uses manual data, error, and end listeners across req, zlib, and res. When the transform errors, sockets sometimes stay open and completion callbacks fire inconsistently.", + "prompt": "Which Node.js API best coordinates teardown and error propagation across the stream chain?", + "options": [ + "stream.pipeline", + "Promise.race", + "Readable.from", + "stream.finished" + ], + "correctAnswer": 0, + "explanation": "stream.pipeline is designed to wire streams together with correct error propagation and cleanup across the entire chain. finished observes completion on a stream, but it does not replace pipeline's orchestration role.", + "tags": [ + "pipeline", + "error-handling", + "streams", + "cleanup" + ], + "references": [ + "Buffers & Streams", + "Async Programming" + ] + }, + { + "id": "nodejs-q09", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A developer raises a writable stream's highWaterMark to 16 MB and tells the team that this guarantees memory use can never exceed that threshold per stream.", + "prompt": "True or false: setting a writable stream's highWaterMark to 16 MB guarantees its internal buffer can never grow beyond 16 MB.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "highWaterMark is a backpressure threshold, not an absolute safety ceiling. Actual buffering behavior can still exceed that number depending on the stream implementation and surrounding flow.", + "tags": [ + "highwatermark", + "backpressure", + "writable-stream", + "buffering" + ], + "references": [ + "Buffers & Streams" + ] + }, + { + "id": "nodejs-q10", + "type": "ordering", + "difficulty": "expert", + "scenario": "A log ingestion service is using proper backpressure. The sink falls behind for a moment and flow control kicks in.", + "prompt": "Arrange the backpressure-aware sequence from earliest to latest once the sink becomes slower than the source.", + "items": [ + "Readable produces a chunk", + "Transform processes the chunk and forwards it", + "Writable.write returns false", + "Upstream stops pulling or pauses", + "The writable emits drain and flow resumes" + ], + "correctAnswer": [ + "Readable produces a chunk", + "Transform processes the chunk and forwards it", + "Writable.write returns false", + "Upstream stops pulling or pauses", + "The writable emits drain and flow resumes" + ], + "explanation": "Once the writable signals pressure by returning false, upstream components should stop pulling more data until drain indicates the sink can accept new chunks again.", + "tags": [ + "backpressure", + "drain", + "writable", + "transform" + ], + "references": [ + "Buffers & Streams", + "Async Programming" + ] + }, + { + "id": "nodejs-q11", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A recommendation service performs CPU-heavy scoring in pure JavaScript and wants parallel execution plus low-overhead shared memory transfer between workers.", + "prompt": "Which scaling primitive is the best fit when low-overhead CPU parallelism and shared memory transfer matter more than process isolation?", + "options": [ + "worker_threads", + "cluster", + "child_process", + "setImmediate" + ], + "correctAnswer": 0, + "explanation": "worker_threads provide parallel JavaScript execution within one process and support efficient shared memory patterns such as SharedArrayBuffer.", + "tags": [ + "worker-threads", + "shared-memory", + "cpu-bound", + "parallelism" + ], + "references": [ + "Scaling", + "Introduction" + ] + }, + { + "id": "nodejs-q12", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A service performs many concurrent fs.readFile operations while also using crypto hashing under load. Tail latency worsens even though the main event loop itself is not obviously blocked all the time.", + "prompt": "Which interventions can legitimately reduce tail latency in this situation?", + "options": [ + "Increase UV_THREADPOOL_SIZE if the hot path is dominated by thread-pool-backed operations", + "Replace pbkdf2Sync with asynchronous pbkdf2 or move hashing into worker_threads", + "Use cluster so multiple processes can exploit multiple CPU cores", + "Wrap each fs.readFile call in process.nextTick to get ahead of the queue", + "Assume worker_threads will make already non-blocking socket I/O faster by themselves" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "fs and some crypto work use the libuv thread pool, CPU-bound sync work can still block the main thread, and cluster can increase overall parallel capacity. process.nextTick does not solve thread-pool contention, and worker_threads do not automatically improve already non-blocking socket I/O.", + "tags": [ + "libuv", + "threadpool", + "throughput", + "worker-threads", + "cluster" + ], + "references": [ + "Scaling", + "Async Programming", + "Event Loop" + ] + }, + { + "id": "nodejs-q13", + "type": "matching", + "difficulty": "expert", + "scenario": "A production memory review shows several different growth patterns, and the team wants to map each symptom to its most likely retention mechanism.", + "prompt": "Match each symptom to the most likely retention mechanism.", + "premises": [ + "RSS grows after introducing a large Buffer cache, while heap snapshots look smaller than expected", + "Heap usage climbs because response objects remain keyed by userId forever", + "Memory leak appears after repeatedly adding listeners to long-lived emitters", + "Detached request context survives because closures still reference it" + ], + "responses": [ + "External memory held outside the ordinary V8 heap", + "Unbounded strong-reference cache", + "Listener accumulation on long-lived emitters", + "Closure retention of request state" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Buffers often contribute external memory, Maps and similar structures can retain objects indefinitely, listener buildup can pin emitters and callbacks, and closures can keep otherwise dead request data alive.", + "tags": [ + "memory-leak", + "buffers", + "eventemitter", + "closures" + ], + "references": [ + "Memory Management", + "Buffers & Streams" + ] + }, + { + "id": "nodejs-q14", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A service caches per-tenant metadata using tenant object instances as keys. Tenants are deleted elsewhere, but cached entries never disappear and memory keeps growing.", + "prompt": "Which change best allows cached entries to disappear when a tenant key object is no longer strongly referenced elsewhere?", + "options": [ + "Replace Map with WeakMap for object keys that should not keep entries alive", + "Call Object.freeze on every tenant object before caching it", + "Increase the cache size limit so eviction happens less often", + "Stringify each tenant object into JSON and use that string as the key" + ], + "correctAnswer": 0, + "explanation": "WeakMap entries do not keep object keys alive. When the key becomes unreachable elsewhere, the runtime may reclaim the entry automatically.", + "tags": [ + "weakmap", + "caching", + "garbage-collection", + "object-keys" + ], + "references": [ + "Memory Management" + ] + }, + { + "id": "nodejs-q15", + "type": "multi-select", + "difficulty": "expert", + "scenario": "Operations sees process RSS far above heapUsed and wants to explain why overall memory consumption is still rising even though ordinary JavaScript object graphs look fairly stable.", + "prompt": "Which factors can make process RSS materially exceed V8 heapUsed even when ordinary JavaScript objects look stable?", + "options": [ + "Large Buffer or ArrayBuffer allocations tracked as external memory", + "Native addon or runtime C/C++ allocations", + "Promises that have already settled and been garbage-collected", + "Stacks and runtime overhead from additional worker threads", + "Only package-lock.json growth on disk" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "heapUsed does not account for every byte in the process. External memory, native allocations, and extra thread stacks all contribute to RSS outside the ordinary managed heap.", + "tags": [ + "rss", + "heapused", + "external-memory", + "worker-threads" + ], + "references": [ + "Memory Management", + "Buffers & Streams", + "Scaling" + ] + }, + { + "id": "nodejs-q16", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A parser takes a small header view with buf.subarray(0, 8) and later normalizes bytes in that view before logging the full packet.", + "prompt": "True or false: mutating a Buffer returned by subarray can change bytes visible through the original Buffer.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "subarray creates a view over the same underlying memory, so writes through the view are reflected in the original Buffer as well.", + "tags": [ + "buffer", + "subarray", + "shared-memory", + "views" + ], + "references": [ + "Buffers & Streams", + "Memory Management" + ] + }, + { + "id": "nodejs-q17", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A package is configured as type: module, and analytics.js tries to call require('./lib.js') at top level during startup.", + "prompt": "What is the most accurate outcome of this module setup?", + "options": [ + "The file is treated as native ESM, and require is not available unless you explicitly bridge it", + "Node silently rewrites require into dynamic import", + "The file runs as CommonJS because it contains require", + "The code works only if package-lock.json exists" + ], + "correctAnswer": 0, + "explanation": "In native ESM, require is not a built-in top-level binding. Code must use import syntax or explicitly create a CommonJS bridge such as createRequire.", + "tags": [ + "esm", + "commonjs", + "createRequire", + "module-resolution" + ], + "references": [ + "Module System" + ] + }, + { + "id": "nodejs-q18", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A library maintainer wants to publish one package that exposes only supported entry points and can serve both import and require consumers without encouraging deep imports into internal build files.", + "prompt": "Which package.json practices help achieve that goal?", + "options": [ + "Define an exports map", + "Use conditional exports such as import and require when dual entry points are needed", + "Treat arbitrary deep paths under dist as stable public API", + "Use explicit .cjs or .mjs boundaries or an appropriate type field where necessary", + "Omit exports so every internal file remains reachable" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "exports lets a package define public entry points precisely, conditional exports support multiple consumers, and clear file or package type boundaries reduce ambiguity between ESM and CommonJS.", + "tags": [ + "exports", + "conditional-exports", + "package-json", + "dual-package" + ], + "references": [ + "Module System", + "Package Managers" + ] + }, + { + "id": "nodejs-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A startup module wants to await remote configuration before exporting initialized clients, and the team does not want to wrap the file in an async IIFE.", + "prompt": "Under which module system does this pattern work natively without wrapping the file in an async IIFE?", + "options": [ + "ESM only", + "CommonJS only", + "Both ESM and CommonJS", + "Neither" + ], + "correctAnswer": 0, + "explanation": "Top-level await is supported in ESM, not in traditional CommonJS modules.", + "tags": [ + "top-level-await", + "esm", + "startup", + "async-init" + ], + "references": [ + "Module System", + "Async Programming" + ] + }, + { + "id": "nodejs-q20", + "type": "matching", + "difficulty": "expert", + "scenario": "A migration team inherits packages with conflicting file extensions and package-level module settings and needs to predict how Node.js will interpret each case.", + "prompt": "Match each setup to the runtime interpretation Node.js will apply.", + "premises": [ + "In a package with type: module, analytics.js is executed", + "In a package with type: commonjs, server.mjs is executed", + "In a package with type: module, legacy.cjs is executed", + "A consumer imports a subpath that is absent from the package exports map" + ], + "responses": [ + "Treated as ESM because package type marks .js as a module", + "Treated as ESM because the .mjs extension overrides package type", + "Treated as CommonJS because the .cjs extension overrides package type", + "Resolution fails because that subpath is not exported" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Package type influences plain .js files, while .mjs and .cjs are explicit overrides. The exports map can also block unlisted subpaths even if the files exist on disk.", + "tags": [ + "module-resolution", + "mjs", + "cjs", + "exports-map" + ], + "references": [ + "Module System" + ] + }, + { + "id": "nodejs-q21", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A CI pipeline must perform a clean install using the lockfile only, remove any preexisting node_modules state, and fail if package.json and the lockfile disagree.", + "prompt": "Which command best matches that CI requirement?", + "options": [ + "npm ci", + "npm install", + "npm update", + "npm audit fix" + ], + "correctAnswer": 0, + "explanation": "npm ci is designed for deterministic CI installs from the lockfile and will fail when the dependency manifest and lockfile are out of sync.", + "tags": [ + "npm-ci", + "lockfile", + "ci", + "reproducibility" + ], + "references": [ + "Package Managers" + ] + }, + { + "id": "nodejs-q22", + "type": "multi-select", + "difficulty": "expert", + "scenario": "After a malicious transitive package incident, a platform team is tightening npm hygiene for a fleet of Node.js services.", + "prompt": "Which controls materially reduce npm supply-chain risk for this service?", + "options": [ + "Commit and review package-lock.json changes", + "Use exact versions for direct dependencies when policy requires it", + "Run npm ci in CI instead of a floating install", + "Allow install scripts from new transitive packages without review to avoid blocking developers", + "Ignore lockfile diffs as long as package.json did not change" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Lockfile review, deterministic CI installs, and disciplined version pinning reduce unexpected dependency drift. Ignoring transitive changes or blindly trusting new install scripts increases risk.", + "tags": [ + "supply-chain", + "lockfile", + "version-pinning", + "ci" + ], + "references": [ + "Package Managers" + ] + }, + { + "id": "nodejs-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A plugin library extends a host web framework and must share the host's singleton instance rather than bundling a private copy inside the plugin package.", + "prompt": "Which package.json field communicates that the host framework must be supplied by the consumer instead of bundled privately?", + "options": [ + "peerDependencies", + "optionalDependencies", + "bundledDependencies", + "overrides" + ], + "correctAnswer": 0, + "explanation": "peerDependencies declare compatibility with a host package that the consuming application is expected to provide.", + "tags": [ + "peerdependencies", + "plugins", + "framework-integration", + "package-json" + ], + "references": [ + "Package Managers", + "Frameworks" + ] + }, + { + "id": "nodejs-q24", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An Express 4 application defines app.get('/users', async (req, res) => { throw new Error('boom'); }). In production, the rejection bypasses the central error middleware.", + "prompt": "Which change most directly makes rejected async handlers reach centralized error middleware?", + "options": [ + "Wrap the handler so rejections call next(err), or use a helper that forwards async errors", + "Add an uncaughtException listener and keep the route unchanged", + "Move the route body into process.nextTick so Express notices the throw", + "Replace res.json with res.sendStatus" + ], + "correctAnswer": 0, + "explanation": "In Express 4, rejected async handlers are not automatically forwarded to next. The route must explicitly pass errors into the middleware chain or use a compatible wrapper.", + "tags": [ + "express", + "error-handling", + "async-routes", + "middleware" + ], + "references": [ + "Frameworks", + "Async Programming" + ] + }, + { + "id": "nodejs-q25", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is evaluating Fastify for a high-throughput JSON API and wants to distinguish its architecture from classic Express.", + "prompt": "Which characteristics align more closely with Fastify than with classic Express?", + "options": [ + "Built-in schema-driven validation and serialization hooks", + "An encapsulation-oriented plugin model", + "A minimal middleware stack centered mainly on mutating req and res objects directly", + "A strong emphasis on low overhead and throughput", + "Dependency injection containers as the default architectural core" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Fastify is known for encapsulated plugins, schema-aware validation and serialization, and strong performance goals. The looser req/res middleware style is more associated with Express, while DI-first structure is more associated with NestJS.", + "tags": [ + "fastify", + "express", + "validation", + "performance" + ], + "references": [ + "Frameworks" + ] + }, + { + "id": "nodejs-q26", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A NestJS service stores the current user object inside a singleton provider, and under load one request occasionally sees another user's data.", + "prompt": "Which fix best matches the framework's architecture?", + "options": [ + "Move request-specific state into request-scoped providers or request-bound context and keep singleton services stateless", + "Mark every controller method async so NestJS isolates state automatically", + "Switch to the Fastify adapter because request data leaks disappear there", + "Store the current user in a module-level variable instead of the provider" + ], + "correctAnswer": 0, + "explanation": "Singleton providers should remain stateless across requests. Request-specific data belongs in request-scoped providers, request context, or explicit method parameters.", + "tags": [ + "nestjs", + "dependency-injection", + "request-scope", + "state-leak" + ], + "references": [ + "Frameworks", + "Memory Management" + ] + }, + { + "id": "nodejs-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A team tries to run a Node.js CLI under Deno without changing anything. The tool assumes npm-style package management, package.json scripts, and CommonJS require-based loading.", + "prompt": "What is the most likely first incompatibility?", + "options": [ + "Assumptions about npm-style package management and CommonJS resolution will not carry over directly", + "Deno cannot execute TypeScript source files", + "Deno has no file system APIs", + "Deno lacks promises and async I/O" + ], + "correctAnswer": 0, + "explanation": "Cross-runtime differences often surface first in module resolution and package-management assumptions. A Node-oriented tool that expects npm conventions and CommonJS behavior may require adaptation before it runs cleanly elsewhere.", + "tags": [ + "deno", + "commonjs", + "package-management", + "portability" + ], + "references": [ + "Runtime Wars", + "Module System", + "Package Managers" + ] + }, + { + "id": "nodejs-q28", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A benchmark shows Bun starting a local dev server faster than Node.js, and leadership wants to decide whether production services should switch runtimes.", + "prompt": "Which evaluation points are legitimate before changing runtimes?", + "options": [ + "Compatibility with required Node core APIs and native addons", + "Package-install behavior, lockfile conventions, and lifecycle script semantics", + "Benchmarks captured under the team's actual workload profile", + "A hello-world benchmark is enough to infer framework compatibility", + "Observability and debugging tooling can be ignored if startup time looks good" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Runtime changes affect more than startup time. Production decisions should consider API compatibility, package ecosystem behavior, tooling maturity, and representative workload performance rather than toy benchmarks alone.", + "tags": [ + "bun", + "benchmarking", + "compatibility", + "tooling" + ], + "references": [ + "Runtime Wars", + "Package Managers", + "Frameworks" + ] + }, + { + "id": "nodejs-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A service fans out to three upstream HTTP APIs and enforces a strict global timeout. When one upstream exceeds the SLA, the team wants the underlying request cancelled instead of merely ignored.", + "prompt": "Which approach best enforces timeout-driven cancellation across fetch-based upstream calls?", + "options": [ + "Use AbortController and pass its signal into the outbound requests", + "Race the promises against a timeout without aborting the underlying I/O", + "Set a cancelled boolean in JavaScript and let the request finish in the background", + "Call process.exit when the timeout expires" + ], + "correctAnswer": 0, + "explanation": "AbortController integrates with modern fetch-based APIs so the timeout can cancel the underlying operation rather than just ignoring a still-running request.", + "tags": [ + "abortcontroller", + "fetch", + "timeouts", + "cancellation" + ], + "references": [ + "Async Programming", + "Event Loop" + ] + }, + { + "id": "nodejs-q30", + "type": "true-false", + "difficulty": "expert", + "scenario": "A server fires Promise.all over 500 database calls while the database pool allows only 20 active connections. Engineers disagree about whether Promise.all guarantees 500-way real throughput.", + "prompt": "True or false: Promise.all creates logical concurrency, but actual throughput is still capped by downstream resources such as the connection pool.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Promise.all can schedule many asynchronous operations at once, but external bottlenecks like connection pools, rate limits, and CPU capacity still determine real execution throughput.", + "tags": [ + "promise-all", + "concurrency", + "connection-pool", + "throughput" + ], + "references": [ + "Async Programming", + "Scaling" + ] + }, + { + "id": "nodejs-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An ingestion endpoint accepts 500 MB NDJSON uploads, concatenates the full body into memory, and only then starts parsing and writing records to storage. During bursts, containers are OOM-killed.", + "prompt": "Which redesign most directly lowers memory pressure without giving up streaming throughput?", + "options": [ + "Parse incrementally from the request stream and preserve backpressure all the way to the sink", + "Raise --max-old-space-size until the burst fits in memory", + "Convert each chunk to base64 before concatenating it", + "Throttle parsing with setInterval while keeping the full payload buffered" + ], + "correctAnswer": 0, + "explanation": "Incremental parsing keeps working sets small and lets storage speed regulate the source through backpressure. Simply raising heap limits or re-encoding buffered data does not address the architectural problem.", + "tags": [ + "ndjson", + "streaming", + "backpressure", + "oom" + ], + "references": [ + "Buffers & Streams", + "Memory Management", + "Scaling" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/python.quiz.json b/quiz-banks/python.quiz.json new file mode 100644 index 0000000..9a90c05 --- /dev/null +++ b/quiz-banks/python.quiz.json @@ -0,0 +1,827 @@ +{ + "moduleId": "python", + "moduleTitle": "Python", + "description": "Scenario-based assessment focused on advanced Python philosophy, execution semantics, memory management, concurrency limits, and powerful language features such as generators, decorators, context managers, and metaclasses.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "python-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A team is migrating a Python 2 service that mixed binary protocol frames with human-readable text, and subtle data corruption appeared after the port to Python 3.", + "prompt": "Which architectural motivation best explains Python 3's decision to keep text and binary data as separate types?", + "options": [ + "To force explicit encoding and decoding boundaries instead of implicit coercion", + "To let the virtual machine skip bytecode generation for string literals", + "To enable multiple inheritance for built-in sequence types", + "To reduce reference-count overhead for immutable objects" + ], + "correctAnswer": 0, + "explanation": "Python 3 separates str and bytes so programs must make text-binary boundaries explicit. That design matches Python's preference for clarity over ambiguous implicit conversions.", + "tags": [ + "python3", + "bytes", + "unicode", + "language-design" + ], + "references": [ + "Introduction", + "Python Philosophy" + ] + }, + { + "id": "python-q02", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "An internal platform team is designing a new configuration API and wants it to feel Pythonic without becoming magical.", + "prompt": "Which proposals align most closely with the Zen of Python?", + "options": [ + "Require callers to pass an explicit timezone object instead of guessing from server locale", + "Silently ignore malformed configuration keys to keep startup smooth", + "Expose one clear constructor for common cases and reserve advanced hooks for edge cases", + "Hide network retries behind dynamic attribute access so call sites look shorter", + "Use namespaces for plugin identifiers to avoid collisions" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Explicit inputs, one obvious path, and strong use of namespaces reflect core Zen principles. Silent failure and surprising magic push in the opposite direction.", + "tags": [ + "zen-of-python", + "api-design", + "explicitness", + "namespaces" + ], + "references": [ + "Python Philosophy" + ] + }, + { + "id": "python-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A new engineer claims Python skips compilation entirely because it is described as an interpreted language.", + "prompt": "True or false: in CPython, module source is compiled to bytecode before the Python Virtual Machine executes it.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "CPython does not execute source text directly. It compiles source into bytecode and then runs that bytecode inside the Python Virtual Machine.", + "tags": [ + "cpython", + "bytecode", + "pvm", + "execution" + ], + "references": [ + "Introduction", + "Execution Model" + ] + }, + { + "id": "python-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "A build-system team wants to explain what happens from module source to top-level execution when a program starts.", + "prompt": "Arrange the stages from earliest to latest.", + "items": [ + "Python reads source text and performs lexical analysis and parsing", + "CPython produces code objects and bytecode", + "A frame is created for module execution", + "The Python Virtual Machine begins executing bytecode instructions", + "Top-level names become bound in the module namespace as statements run" + ], + "correctAnswer": [ + "Python reads source text and performs lexical analysis and parsing", + "CPython produces code objects and bytecode", + "A frame is created for module execution", + "The Python Virtual Machine begins executing bytecode instructions", + "Top-level names become bound in the module namespace as statements run" + ], + "explanation": "Parsing and compilation come before execution. Once the module frame exists, the PVM runs bytecode and top-level names are bound as the statements execute.", + "tags": [ + "execution-pipeline", + "module-loading", + "frames", + "bytecode" + ], + "references": [ + "Execution Model" + ] + }, + { + "id": "python-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "A profiler report shows several different lifetime patterns for objects, and the team wants to map each symptom to the correct memory-management mechanism.", + "prompt": "Match each situation to the best explanation.", + "premises": [ + "A temporary list created inside a helper becomes unreachable when the helper returns", + "Two container objects reference each other after all external names are deleted", + "A cache entry should disappear automatically when its key object has no other strong references", + "A long-lived object survives many collections and is scanned less aggressively than young objects" + ], + "responses": [ + "Reference count hits zero and memory can be reclaimed immediately", + "Cyclic garbage collection is needed to detect an unreachable reference cycle", + "Use a weak-reference-based association so the key does not stay alive", + "Generational collection treats older surviving objects differently from newer ones" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Reference counting handles ordinary short-lived objects, cyclic GC handles unreachable cycles, weak references avoid keeping keys alive, and generational GC treats older survivors differently from young objects.", + "tags": [ + "reference-counting", + "cyclic-gc", + "weakref", + "generational-gc" + ], + "references": [ + "Memory Management" + ] + }, + { + "id": "python-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A nested function originally read an outer variable successfully, but after a later edit added an assignment to the same name inside the nested function, the code started failing with UnboundLocalError.", + "prompt": "Why does CPython treat that name as local inside the nested function?", + "options": [ + "The compiler determines scope from assignments in the function body before execution", + "The GIL converts free variables into locals to avoid races", + "Reference counting duplicates outer objects into each child frame", + "The bytecode loader resolves names only after the first function call" + ], + "correctAnswer": 0, + "explanation": "Scope is a compile-time decision in CPython. If a function assigns to a name, that name is treated as local unless declared otherwise with global or nonlocal.", + "tags": [ + "scope", + "closures", + "unboundlocalerror", + "compiler" + ], + "references": [ + "Execution Model", + "Advanced Concepts" + ] + }, + { + "id": "python-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A serverless deployment team wants to understand what benefits they get from shipping warm bytecode caches along with unchanged source.", + "prompt": "Which statements about .pyc bytecode caches are correct in CPython?", + "options": [ + ".pyc files cache compiled bytecode so unchanged source can skip recompilation work", + ".pyc files contain native machine code specialized for the deployment CPU", + "Changing the source invalidates reuse of the previous bytecode cache", + "Deleting .pyc files makes Python unable to import the module at all", + "Bytecode caches can reduce startup work but do not remove the PVM execution step" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": ".pyc files cache bytecode, not native code. They can reduce import-time compilation overhead, but Python still executes bytecode in the VM, and missing caches do not prevent import.", + "tags": [ + "pyc", + "imports", + "startup", + "bytecode-cache" + ], + "references": [ + "Execution Model" + ] + }, + { + "id": "python-q08", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A generator-based ETL pipeline emits one batch, pauses, and then resumes later without recomputing earlier state.", + "prompt": "What is preserved so the generator can continue from the next yield point?", + "options": [ + "The generator frame, including instruction position and local bindings", + "A cloned OS thread blocked in kernel space", + "A copy of the module's full heap segment", + "A rewritten AST with executed branches removed" + ], + "correctAnswer": 0, + "explanation": "A generator keeps its execution state in a suspended frame. That includes the current instruction position and local variables needed to resume.", + "tags": [ + "generators", + "frames", + "yield", + "lazy-evaluation" + ], + "references": [ + "Execution Model", + "Advanced Concepts" + ] + }, + { + "id": "python-q09", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A threaded downloader performs better than a single-threaded version even though it runs on CPython.", + "prompt": "True or false: this can happen because CPython may release the GIL while threads wait on blocking I/O.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "The GIL prevents multiple threads from executing Python bytecode at the same instant, but blocking I/O often releases the GIL so another thread can make progress.", + "tags": [ + "gil", + "threads", + "io-bound", + "concurrency" + ], + "references": [ + "Global Interpreter Lock" + ] + }, + { + "id": "python-q10", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A transaction manager wraps database work in a with block and must guarantee cleanup even if business logic raises an exception halfway through.", + "prompt": "Which guarantee does the context manager protocol provide?", + "options": [ + "__exit__ is invoked during block teardown, allowing cleanup or rollback", + "The block is retried automatically until it succeeds", + "All nested frames are discarded and exceptions are always swallowed", + "Reference counts inside the block are frozen until commit" + ], + "correctAnswer": 0, + "explanation": "The with statement ensures the context manager's exit logic runs during teardown. That is what makes reliable cleanup and rollback patterns possible.", + "tags": [ + "context-managers", + "with-statement", + "cleanup", + "exceptions" + ], + "references": [ + "Advanced Concepts" + ] + }, + { + "id": "python-q11", + "type": "ordering", + "difficulty": "expert", + "scenario": "A payment service stacks several decorators on a handler, and the team wants to reason about what happens when the function definition itself executes.", + "prompt": "Arrange these events from earliest to latest when Python executes a decorated function definition.", + "items": [ + "Decorator expressions are evaluated in the surrounding scope", + "A function object is created from the code object", + "The decorators are applied from the bottommost decorator upward", + "The final wrapped callable is bound to the function name in the namespace" + ], + "correctAnswer": [ + "Decorator expressions are evaluated in the surrounding scope", + "A function object is created from the code object", + "The decorators are applied from the bottommost decorator upward", + "The final wrapped callable is bound to the function name in the namespace" + ], + "explanation": "Decorator expressions are evaluated first, then Python creates the function object, applies decorators bottom-up, and binds the resulting callable to the name.", + "tags": [ + "decorators", + "function-definition", + "name-binding", + "execution-order" + ], + "references": [ + "Execution Model", + "Advanced Concepts" + ] + }, + { + "id": "python-q12", + "type": "matching", + "difficulty": "expert", + "scenario": "An architecture review compares several advanced Python features and wants to connect each one to the requirement it solves most directly.", + "prompt": "Match each requirement to the Python feature that best fits it.", + "premises": [ + "Guarantee setup and teardown around a block of resource-using code", + "Compute values lazily while preserving suspension state between iterations", + "Intercept attribute access logic for a managed class attribute", + "Customize how a class object itself is constructed" + ], + "responses": [ + "Context manager", + "Generator", + "Descriptor", + "Metaclass" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Context managers structure setup and teardown, generators preserve suspended execution, descriptors participate in attribute access, and metaclasses customize class creation.", + "tags": [ + "context-managers", + "generators", + "descriptors", + "metaclasses" + ], + "references": [ + "Advanced Concepts" + ] + }, + { + "id": "python-q13", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An analytics platform mixes CPU-heavy transformations with network polling and runs under CPython on a multi-core machine.", + "prompt": "Which decisions are likely to improve overall throughput for this mixed workload?", + "options": [ + "Move CPU-bound work to separate processes", + "Keep adding pure Python threads that perform the same CPU-bound loop", + "Use a C-backed or native extension path that can release the GIL during heavy work", + "Handle blocking network waits with threads or asyncio while isolating CPU-heavy work", + "Assume the GIL lets all Python bytecode execute on all cores simultaneously" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "CPU-heavy pure Python threads do not scale across cores under the GIL. Separate processes, native code that can release the GIL, and sensible handling of I/O concurrency are the effective options.", + "tags": [ + "gil", + "multiprocessing", + "asyncio", + "performance" + ], + "references": [ + "Global Interpreter Lock", + "Advanced Concepts" + ] + }, + { + "id": "python-q14", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A plugin host stores metadata about plugin objects in a cache, but unloaded plugins remain alive because the cache still references them.", + "prompt": "Which change most directly avoids keeping entries alive solely because the cache still holds their keys?", + "options": [ + "Use weakref.WeakKeyDictionary for key-based metadata", + "Replace the dictionary with a tuple of key-value pairs", + "Increase GC thresholds so collection runs less often", + "Store plugin IDs in a global list for faster lookup" + ], + "correctAnswer": 0, + "explanation": "Weak-key storage lets entries disappear when the key object has no other strong references. The other options preserve or worsen retention.", + "tags": [ + "weakref", + "caching", + "memory-leak", + "object-lifetime" + ], + "references": [ + "Memory Management", + "Advanced Concepts" + ] + }, + { + "id": "python-q15", + "type": "true-false", + "difficulty": "expert", + "scenario": "A request handler uses a default list argument for memoized state and unexpectedly accumulates data across unrelated requests.", + "prompt": "True or false: the default list object is created once when the function is defined, not each time the function is called.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Default argument expressions are evaluated at function definition time. That is why a mutable default can persist state across later calls.", + "tags": [ + "default-arguments", + "function-definition", + "mutable-state", + "pitfalls" + ], + "references": [ + "Execution Model", + "Advanced Concepts" + ] + }, + { + "id": "python-q16", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teammate calls an async function and expects it to start executing immediately, but nothing seems to happen until the result is awaited later.", + "prompt": "What best explains the behavior of an async def function call in Python?", + "options": [ + "Calling it creates a coroutine object that must be awaited or scheduled to execute", + "Calling it immediately starts a new OS thread", + "Calling it recompiles the function body into a fresh .pyc file", + "Calling it bypasses the event loop and runs synchronously until completion" + ], + "correctAnswer": 0, + "explanation": "An async def call returns a coroutine object. Execution does not progress until the coroutine is awaited or otherwise scheduled by an event loop.", + "tags": [ + "asyncio", + "coroutines", + "event-loop", + "async-def" + ], + "references": [ + "Advanced Concepts", + "Execution Model" + ] + }, + { + "id": "python-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A log-processing pipeline must handle terabytes of records without exhausting memory while still composing multiple transformation stages.", + "prompt": "Which properties make generators a better fit than eagerly built lists in this situation?", + "options": [ + "They yield values incrementally instead of materializing the whole result at once", + "They preserve suspension state between yields", + "They guarantee parallel execution across CPU cores", + "They can be chained into pipelines that keep working-set size small", + "They provide constant-time random access to any produced item" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Generators are valuable here because they are lazy, preserve state, and can be composed into streaming pipelines. They do not provide random access or automatic parallelism.", + "tags": [ + "generators", + "streaming", + "memory-efficiency", + "pipelines" + ], + "references": [ + "Advanced Concepts", + "Memory Management" + ] + }, + { + "id": "python-q18", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A function prints a global config object, but after adding config = config.merge(local) later in the function body, it now fails before reaching the assignment line.", + "prompt": "Which rule explains the failure?", + "options": [ + "Any assignment in the function body makes the name local unless declared global or nonlocal", + "The interpreter evaluates print arguments only after reaching the assignment statement", + "The module namespace is discarded when entering a function", + "The GIL forbids reading globals before local initialization" + ], + "correctAnswer": 0, + "explanation": "CPython determines scope at compile time. Once the function assigns to config, the name is treated as local throughout that function unless explicitly declared otherwise.", + "tags": [ + "scope-rules", + "global", + "nonlocal", + "name-resolution" + ], + "references": [ + "Execution Model" + ] + }, + { + "id": "python-q19", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A backend team debates whether Python variables are typed containers or labels bound to objects.", + "prompt": "True or false: in Python, names are bindings to objects, and the object's type belongs to the object rather than to the name.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Python names are labels bound to objects. The object carries its type and behavior; the name is simply a reference in a namespace.", + "tags": [ + "object-model", + "bindings", + "dynamic-typing", + "namespaces" + ], + "references": [ + "Introduction", + "Execution Model" + ] + }, + { + "id": "python-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A numerical library backed by native code achieves better parallel throughput than an equivalent pure-Python threaded implementation of the same algorithm.", + "prompt": "Which explanation is most accurate?", + "options": [ + "The library can perform heavy work in native code and may release the GIL around that work", + "Native code automatically disables reference counting for the full interpreter process", + "Importing a C extension makes CPython skip bytecode generation globally", + "Module namespaces are shared across processes instead of threads" + ], + "correctAnswer": 0, + "explanation": "Native extensions can move expensive work outside ordinary Python bytecode execution and may release the GIL while doing that work, enabling better parallel utilization.", + "tags": [ + "gil", + "c-extensions", + "parallelism", + "performance" + ], + "references": [ + "Global Interpreter Lock", + "Advanced Concepts" + ] + }, + { + "id": "python-q21", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A custom context manager needs to log failures and optionally suppress one specific transient exception after cleanup runs.", + "prompt": "Which statements about the context manager protocol are correct?", + "options": [ + "__enter__ runs before the block body", + "__exit__ receives exception details if the block raises", + "Returning True from __exit__ can suppress the exception", + "Using with creates a new interpreter process isolated from the caller", + "Context managers are only for files and cannot model logical transactions" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "The protocol brackets a block with enter and exit hooks, passes exception details to exit, and permits suppression by returning a truthy value. It is general-purpose, not file-specific.", + "tags": [ + "context-managers", + "exception-handling", + "resource-management", + "protocols" + ], + "references": [ + "Advanced Concepts" + ] + }, + { + "id": "python-q22", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An API endpoint transforms a very large sequence and then iterates over the result exactly once, but list comprehension spikes peak memory.", + "prompt": "Which refactor best reduces peak memory while preserving one-pass iteration semantics?", + "options": [ + "Replace the list comprehension with a generator expression consumed lazily", + "Move the transformation into a class body so names live longer", + "Wrap the result in tuple() before iterating", + "Cache the full list in a default argument" + ], + "correctAnswer": 0, + "explanation": "A generator expression delays production until iteration happens, which reduces peak memory for one-pass consumption. Materializing a tuple or list defeats that goal.", + "tags": [ + "generator-expressions", + "memory-usage", + "lazy-evaluation", + "iteration" + ], + "references": [ + "Advanced Concepts", + "Memory Management" + ] + }, + { + "id": "python-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A framework must enforce that every model class registers a schema attribute at the moment the class itself is created.", + "prompt": "Which mechanism is the most direct fit for altering class creation behavior itself?", + "options": [ + "A metaclass", + "A generator", + "A context manager", + "A weak reference proxy" + ], + "correctAnswer": 0, + "explanation": "Metaclasses customize how class objects are constructed. That makes them the direct mechanism for enforcing rules during class creation.", + "tags": [ + "metaclasses", + "class-creation", + "frameworks", + "advanced-python" + ], + "references": [ + "Advanced Concepts" + ] + }, + { + "id": "python-q24", + "type": "true-false", + "difficulty": "expert", + "scenario": "Two ORM entities point to each other, and all external names to both objects have been dropped.", + "prompt": "True or false: reference counting alone is sufficient to reclaim both objects in this case.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "If two objects still reference each other, their reference counts may remain above zero even though the cycle is unreachable. Cyclic garbage collection is needed for that case.", + "tags": [ + "reference-counting", + "cycles", + "garbage-collection", + "orm" + ], + "references": [ + "Memory Management" + ] + }, + { + "id": "python-q25", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A debugger inspects a live CPython frame while stepping through deeply nested calls to understand why a name resolves the way it does.", + "prompt": "Which pieces of execution state are associated with a frame object or its surrounding call-stack machinery?", + "options": [ + "Local variable bindings for that execution context", + "A link to the previous frame or caller context", + "The current instruction position within the code object", + "A dedicated OS process created for each frame", + "References used for global and builtins name lookup" + ], + "correctAnswer": [ + 0, + 1, + 2, + 4 + ], + "explanation": "Frames capture the execution context: locals, instruction position, links to other frames, and namespace references needed for lookups. They are not separate OS processes.", + "tags": [ + "frames", + "call-stack", + "debugging", + "name-resolution" + ], + "references": [ + "Execution Model" + ] + }, + { + "id": "python-q26", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A library author wants obj.field access to trigger validation logic without changing any call sites that read or write the attribute.", + "prompt": "Which Python feature is designed to participate directly in attribute access on classes and instances?", + "options": [ + "Descriptor protocol", + "Bytecode cache", + "Reference-cycle detector", + "Import-path hook" + ], + "correctAnswer": 0, + "explanation": "Descriptors are objects that define methods such as __get__ and __set__, allowing them to participate directly in attribute access semantics.", + "tags": [ + "descriptors", + "attribute-access", + "properties", + "protocols" + ], + "references": [ + "Advanced Concepts" + ] + }, + { + "id": "python-q27", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A large application imports the same module from several places, but its expensive top-level initialization normally runs only once per process.", + "prompt": "Why is that initialization code usually executed only once per interpreter session?", + "options": [ + "The imported module object is cached in sys.modules after the first successful import", + "The GIL forbids top-level code from executing twice", + "A .pyc file stores fully initialized global variables as live objects", + "Reference counting pins imported modules to a single CPU core" + ], + "correctAnswer": 0, + "explanation": "Python caches imported module objects in sys.modules. Later imports typically reuse that cached module instead of rerunning its top-level code.", + "tags": [ + "imports", + "sys-modules", + "module-cache", + "startup" + ], + "references": [ + "Execution Model" + ] + }, + { + "id": "python-q28", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An asyncio web service directly calls a pure-Python image resize inside an awaited request handler, and all other requests stall until it finishes.", + "prompt": "What is the most accurate diagnosis?", + "options": [ + "The CPU-bound function blocks the event-loop thread, so other coroutines cannot make progress", + "The event loop duplicates the coroutine across worker processes and waits for consensus", + "Await automatically releases the GIL even for synchronous CPU work", + "The stalled requests are caused by .pyc cache invalidation" + ], + "correctAnswer": 0, + "explanation": "Asyncio relies on cooperative scheduling. A synchronous CPU-heavy function blocks the event loop thread, preventing other coroutines from advancing until it returns.", + "tags": [ + "asyncio", + "event-loop", + "cpu-bound", + "latency" + ], + "references": [ + "Advanced Concepts", + "Global Interpreter Lock" + ] + }, + { + "id": "python-q29", + "type": "true-false", + "difficulty": "expert", + "scenario": "A team discusses a hypothetical no-GIL runtime and assumes all thread-safety concerns would disappear if the global lock were removed.", + "prompt": "True or false: removing the GIL would eliminate the need for synchronization around shared mutable state.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "The GIL is not a substitute for all synchronization. Shared mutable state can still require explicit coordination to avoid races and preserve invariants.", + "tags": [ + "gil", + "thread-safety", + "shared-state", + "synchronization" + ], + "references": [ + "Global Interpreter Lock" + ] + }, + { + "id": "python-q30", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An API review rejects a helper that guesses a caller's desired timezone from several ambiguous signals instead of requiring a direct argument.", + "prompt": "Which Zen principle most directly argues against that design?", + "options": [ + "In the face of ambiguity, refuse the temptation to guess", + "Sparse is better than dense", + "Now is better than never", + "Flat is better than nested" + ], + "correctAnswer": 0, + "explanation": "The Zen explicitly warns against guessing in ambiguous situations. Requiring explicit intent is more Pythonic than hiding uncertainty behind guesswork.", + "tags": [ + "zen-of-python", + "ambiguity", + "api-design", + "explicitness" + ], + "references": [ + "Python Philosophy" + ] + }, + { + "id": "python-q31", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A long-running data service shows steady memory growth even after each batch is reported as finished, and the team is trying to distinguish a retention bug from normal load.", + "prompt": "Which observations would most strongly suggest the growth is caused by lingering Python references rather than by active workload alone?", + "options": [ + "Objects expected to die remain reachable from global caches or closures", + "Weak-reference-backed caches release entries when external owners disappear", + "Generation counts keep rising while logically finished objects still appear in heap snapshots", + "Peak memory drops promptly when batch-scoped containers go out of scope", + "A debug list of processed records is appended to forever" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Reachability from globals or closures, persistent dead-looking objects in snapshots, and intentionally unbounded debug retention all indicate references are keeping data alive beyond its useful lifetime.", + "tags": [ + "memory-leak", + "reachability", + "heap-analysis", + "retention" + ], + "references": [ + "Memory Management", + "Advanced Concepts" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/react.quiz.json b/quiz-banks/react.quiz.json new file mode 100644 index 0000000..d54f6d7 --- /dev/null +++ b/quiz-banks/react.quiz.json @@ -0,0 +1,844 @@ +{ + "moduleId": "react", + "moduleTitle": "React", + "description": "Scenario-based assessment covering advanced React concepts including JSX, component architecture, state and props, hooks, DOM interaction, virtual DOM behavior, and reconciliation strategies.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "react-q01", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A product page renders a heavy price calculator component. The parent rerenders every time analytics state updates, even when the calculator inputs are unchanged. The calculator receives a config object literal and an inline callback as props.", + "prompt": "What change is most likely to reduce unnecessary rerenders of the calculator without changing its behavior?", + "options": [ + "Wrap the calculator in React.memo and stabilize the object and callback props", + "Move the calculator lower in the JSX tree without changing props", + "Replace all props with a single JSON string", + "Force the calculator to update only inside useEffect" + ], + "correctAnswer": 0, + "explanation": "React.memo only helps when prop identities remain stable. If the parent recreates object literals and callbacks on every render, the memoized child still rerenders. Stabilizing those props addresses the root cause.", + "tags": [ + "components", + "performance", + "props", + "memoization" + ], + "references": [ + "Components", + "State & Props", + "Reconciliation" + ] + }, + { + "id": "react-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A dashboard component fetches user metrics in a useEffect. In development, the team sees duplicate network requests after enabling Strict Mode, and they are trying to separate real bugs from expected behavior.", + "prompt": "Which explanations are valid for why a fetch effect may appear to run twice in development?", + "options": [ + "Strict Mode may intentionally mount, unmount, and remount components to surface unsafe side effects", + "An unstable dependency value can cause the effect to rerun", + "useEffect always runs twice in production for parity with development", + "A state update inside the effect can trigger another render and another effect run when dependencies change", + "React duplicates requests automatically to warm the browser cache" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Development Strict Mode can intentionally replay effects. Separately, unstable dependencies or state changes that affect dependencies can rerun the effect. React does not double-run effects in production by default and does not issue duplicate requests for cache warming.", + "tags": [ + "hooks", + "effects", + "strict-mode", + "debugging" + ], + "references": [ + "Hooks", + "Reconciliation" + ] + }, + { + "id": "react-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A teammate stores a derived fullName string in state and updates it in useEffect whenever firstName or lastName changes.", + "prompt": "True or false: when a value can be derived directly from current props or state during render, storing it in separate state is usually unnecessary.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Derived values that can be computed during render generally should not be duplicated in state. Duplicating them creates synchronization risk and extra updates.", + "tags": [ + "state", + "derived-state", + "rendering" + ], + "references": [ + "State & Props", + "Hooks" + ] + }, + { + "id": "react-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "A team is analyzing what happens after a button click updates component state in a standard client-rendered React app.", + "prompt": "Order the high-level steps from the user interaction to the DOM update.", + "items": [ + "React schedules and processes the state update", + "The event handler calls the state setter", + "React compares the new element tree with the previous one", + "React commits the necessary DOM mutations" + ], + "correctAnswer": [ + "The event handler calls the state setter", + "React schedules and processes the state update", + "React compares the new element tree with the previous one", + "React commits the necessary DOM mutations" + ], + "explanation": "The click handler triggers a state update, React processes the update and renders a new tree, reconciliation compares it to the previous tree, and the commit phase applies the minimal DOM changes.", + "tags": [ + "virtual-dom", + "reconciliation", + "state", + "render-cycle" + ], + "references": [ + "Virtual DOM", + "Reconciliation", + "DOM Fundamentals" + ] + }, + { + "id": "react-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "You are mentoring a team that confuses React terms during performance investigations.", + "prompt": "Match each premise to the most appropriate response.", + "premises": [ + "A component function is called again", + "React updates the browser DOM", + "React compares previous and next UI descriptions", + "State updates are queued before the UI changes appear" + ], + "responses": [ + "Render phase", + "Commit phase", + "Reconciliation", + "Scheduling and batching" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Calling the component is part of rendering, actual browser DOM mutations happen in the commit phase, comparison is reconciliation, and queued updates are part of scheduling and batching behavior.", + "tags": [ + "rendering", + "virtual-dom", + "reconciliation", + "terminology" + ], + "references": [ + "Virtual DOM", + "Reconciliation", + "DOM Fundamentals" + ] + }, + { + "id": "react-q06", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A list of editable invoice rows uses array indexes as keys. When the finance team inserts a new row near the top, input focus and local row state jump to the wrong row.", + "prompt": "What is the best explanation for this bug?", + "options": [ + "Index keys can cause React to associate existing component instances with the wrong items after reordering", + "React does not support editable inputs inside lists", + "Keys only matter for server-rendered lists", + "Using indexes forces React to fully reload the page" + ], + "correctAnswer": 0, + "explanation": "Keys help React preserve identity across renders. Index keys break that identity when items are inserted, removed, or reordered, which can move local state and focus to the wrong element.", + "tags": [ + "lists", + "keys", + "reconciliation", + "state-preservation" + ], + "references": [ + "Components", + "Reconciliation" + ] + }, + { + "id": "react-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A search page passes state through six intermediate components before it reaches the filter panel and results summary. The team wants to reduce prop drilling without turning every value into global state.", + "prompt": "Which approaches are reasonable depending on the actual sharing needs?", + "options": [ + "Use composition so intermediate components render children without owning unrelated props", + "Use context for values that are broadly needed in a subtree", + "Duplicate the same state independently in every component that needs it", + "Lift state to the nearest common owner when multiple siblings truly share it", + "Replace props with direct DOM queries to read values from inputs" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Composition, context, and lifting state are standard ways to reduce prop drilling while preserving React data flow. Duplicating state or reading through the DOM undermines consistency and predictability.", + "tags": [ + "components", + "context", + "state-sharing", + "props" + ], + "references": [ + "Components", + "State & Props", + "Hooks" + ] + }, + { + "id": "react-q08", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A component mutates an array stored in state with push and then calls the setter with the same array reference.", + "prompt": "True or false: mutating state in place can prevent React from detecting the intended change reliably.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "React state updates should be treated immutably. Reusing the same reference can produce stale UI and makes update logic harder to reason about.", + "tags": [ + "state", + "immutability", + "updates" + ], + "references": [ + "State & Props" + ] + }, + { + "id": "react-q09", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A form field should show the latest server default when the selected record changes, but once the user starts typing, local edits must remain isolated from parent rerenders.", + "prompt": "Which design is the best fit?", + "options": [ + "Initialize local state from the selected record when identity changes, then let the field own subsequent edits", + "Mirror the prop into state on every render unconditionally", + "Write directly to the DOM input value and skip React state entirely", + "Store every keystroke only in a module-level variable" + ], + "correctAnswer": 0, + "explanation": "The component needs controlled ownership after initialization, tied to record identity changes rather than unconditional prop mirroring. Constantly syncing prop to state would overwrite user edits.", + "tags": [ + "forms", + "state", + "props", + "controlled-components" + ], + "references": [ + "State & Props", + "Components" + ] + }, + { + "id": "react-q10", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teammate writes JSX with a component invocation inside braces like regular JavaScript function output and gets confused about element creation.", + "prompt": "What does JSX primarily provide in React code?", + "options": [ + "A declarative syntax for describing element trees that React can render", + "A required templating language executed by the browser at runtime only", + "A DOM querying API for selecting existing nodes", + "A replacement for JavaScript expressions inside components" + ], + "correctAnswer": 0, + "explanation": "JSX is syntax for expressing UI trees declaratively. It compiles to element creation calls and still allows normal JavaScript expressions within braces.", + "tags": [ + "jsx", + "syntax", + "elements" + ], + "references": [ + "Introduction", + "JSX" + ] + }, + { + "id": "react-q11", + "type": "matching", + "difficulty": "advanced", + "scenario": "A review session is separating hook responsibilities to fix a messy component.", + "prompt": "Match each hook usage to the most appropriate purpose.", + "premises": [ + "Persist a mutable value across renders without causing rerenders", + "Run logic after React commits updates to the screen", + "Store data that should trigger a rerender when changed", + "Reuse stateful behavior across components" + ], + "responses": [ + "useRef", + "useEffect", + "useState", + "Custom hook" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "useRef holds mutable values without triggering rerenders, useEffect runs post-commit side effects, useState drives reactive rerenders, and custom hooks package reusable stateful logic.", + "tags": [ + "hooks", + "useRef", + "useEffect", + "useState" + ], + "references": [ + "Hooks" + ] + }, + { + "id": "react-q12", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A modal registers window event listeners and starts a polling timer when opened. After opening and closing it repeatedly, memory usage grows and duplicate handlers fire.", + "prompt": "Which fixes directly address the likely React-side problem?", + "options": [ + "Return a cleanup function from the effect to remove listeners and stop timers", + "Ensure the effect dependencies reflect when the side effect should be recreated", + "Replace the modal with a class component because function components cannot clean up", + "Store listeners in state so React can render them", + "Avoid creating side effects during render" + ], + "correctAnswer": [ + 0, + 1, + 4 + ], + "explanation": "Effects should clean up subscriptions, timers, and listeners, and dependency lists should reflect when they need to rerun. Side effects do not belong in render. Function components can clean up perfectly well through effects.", + "tags": [ + "hooks", + "effects", + "cleanup", + "memory-leaks" + ], + "references": [ + "Hooks", + "DOM Fundamentals" + ] + }, + { + "id": "react-q13", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An accessibility audit flags a custom button implemented with a div and click handler. Keyboard activation and semantics are inconsistent across browsers.", + "prompt": "What is the best first fix in React?", + "options": [ + "Replace the div with a semantic button element and preserve the React handler", + "Keep the div and add more nested spans", + "Add a CSS cursor pointer rule to the div", + "Move the click handler to document.addEventListener" + ], + "correctAnswer": 0, + "explanation": "React encourages semantic DOM usage. A real button gives keyboard behavior, focus handling, and accessibility semantics by default while still working with React events.", + "tags": [ + "dom", + "accessibility", + "events", + "semantics" + ], + "references": [ + "DOM Fundamentals", + "Components" + ] + }, + { + "id": "react-q14", + "type": "ordering", + "difficulty": "expert", + "scenario": "A component subscribes to a chat channel in an effect and also returns a cleanup function. The roomId prop changes from one room to another.", + "prompt": "Order the effect lifecycle around that dependency change.", + "items": [ + "React commits the render for the new roomId", + "The previous cleanup unsubscribes from the old room", + "The new effect subscribes to the new room", + "A render occurs with the updated roomId" + ], + "correctAnswer": [ + "A render occurs with the updated roomId", + "React commits the render for the new roomId", + "The previous cleanup unsubscribes from the old room", + "The new effect subscribes to the new room" + ], + "explanation": "With a dependency change, React renders the new output, commits it, then runs cleanup for the old effect before running the new effect setup.", + "tags": [ + "hooks", + "effects", + "lifecycle", + "subscriptions" + ], + "references": [ + "Hooks", + "Reconciliation" + ] + }, + { + "id": "react-q15", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A teammate asks why React can update a large table quickly even though the component tree is rerendered conceptually after a filter change.", + "prompt": "Which statement best describes the role of the virtual DOM here?", + "options": [ + "React computes a new UI description in memory and then applies only the necessary DOM changes", + "React bypasses the real DOM entirely after the first render", + "The browser provides a built-in virtual DOM that React delegates to", + "Virtual DOM means components never rerender, only DOM nodes do" + ], + "correctAnswer": 0, + "explanation": "React renders a new element tree in memory, compares it to the previous one, and commits only the required changes to the real DOM.", + "tags": [ + "virtual-dom", + "performance", + "rendering" + ], + "references": [ + "Virtual DOM", + "Reconciliation", + "DOM Fundamentals" + ] + }, + { + "id": "react-q16", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A developer assumes that calling a state setter always updates the screen immediately before the next line in the same event handler runs.", + "prompt": "True or false: React may batch and defer visible state updates, so reading the old state value synchronously in the same handler can still happen.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "React schedules state updates and may batch them. Code later in the same synchronous handler can still observe the pre-update value unless using an updater function or derived next value.", + "tags": [ + "state", + "batching", + "events" + ], + "references": [ + "State & Props", + "Hooks" + ] + }, + { + "id": "react-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is reviewing why a memoized child still rerenders unexpectedly. The parent passes several props, some primitive and some recreated every render.", + "prompt": "Which props are most likely to break shallow memoization when recreated on each render?", + "options": [ + "A newly constructed object literal", + "A stable string primitive with the same value", + "A newly created arrow function", + "A newly created array literal", + "A number primitive with the same value" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "Shallow comparison treats objects, arrays, and functions by reference identity. Recreating them each render makes them appear changed even if their contents or behavior are effectively the same.", + "tags": [ + "memoization", + "props", + "components", + "performance" + ], + "references": [ + "Components", + "State & Props", + "Reconciliation" + ] + }, + { + "id": "react-q18", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A profile component sometimes renders before async user data arrives. The current code throws because it reads nested properties unconditionally.", + "prompt": "What is the most React-appropriate way to handle this rendering path?", + "options": [ + "Render conditionally based on available data and show a loading or fallback UI", + "Block the entire component tree with a while loop until data exists", + "Catch the error from property access in every JSX expression", + "Store a fake user object in a global variable and overwrite it later" + ], + "correctAnswer": 0, + "explanation": "React components should render appropriate fallback states when required data is unavailable. Conditional rendering keeps the UI predictable and avoids runtime errors.", + "tags": [ + "components", + "conditional-rendering", + "async-ui" + ], + "references": [ + "Components", + "JSX" + ] + }, + { + "id": "react-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A rich text editor component must preserve its internal selection state across parent rerenders, but when the documentId changes it should reset completely.", + "prompt": "What is the cleanest React mechanism to express this requirement?", + "options": [ + "Give the editor a key based on documentId so identity resets only when that key changes", + "Force a full page reload whenever documentId changes", + "Copy every internal field into the parent on each keystroke and recreate the editor", + "Use array index as a key because it changes more often" + ], + "correctAnswer": 0, + "explanation": "Keys control component identity. A stable key preserves local state across ordinary rerenders, while changing the key tells React to remount and reset the editor for a different document.", + "tags": [ + "keys", + "identity", + "state-preservation", + "reconciliation" + ], + "references": [ + "Components", + "Reconciliation" + ] + }, + { + "id": "react-q20", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A code reviewer says hooks can be called from inside a loop as long as the loop runs the same number of times most of the time.", + "prompt": "True or false: hooks must be called in a consistent order on every render, not conditionally or inside loops with variable execution paths.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "React relies on consistent hook call ordering to associate state with the correct hook positions. Conditional or variable-order calls break that model.", + "tags": [ + "hooks", + "rules-of-hooks", + "rendering" + ], + "references": [ + "Hooks" + ] + }, + { + "id": "react-q21", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teammate wants a child component to modify parent state and proposes importing the parent's setter directly from another file where it was declared.", + "prompt": "Which pattern best follows React's one-way data flow?", + "options": [ + "Pass a callback from the parent to the child so the child can request the change", + "Let the child mutate the parent's local variables directly", + "Read and write parent DOM nodes instead of passing data", + "Duplicate the parent state in the child and hope they stay aligned" + ], + "correctAnswer": 0, + "explanation": "React data generally flows down through props, while children communicate requested changes upward through callbacks passed from parents.", + "tags": [ + "state", + "props", + "callbacks", + "data-flow" + ], + "references": [ + "State & Props", + "Components" + ] + }, + { + "id": "react-q22", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A review of a custom hook finds several responsibilities mixed together: data fetching, event listeners, and imperative DOM focus handling.", + "prompt": "Which statements reflect sound hook design principles in this situation?", + "options": [ + "Custom hooks can extract and reuse stateful logic without sharing state instances between components", + "Effects should be grouped only by lifecycle timing, never by concern", + "Separating unrelated side effects into different effects can improve clarity and correctness", + "A custom hook may internally use multiple built-in hooks", + "A custom hook automatically becomes global state for every caller" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "Custom hooks package reusable logic, not shared component instances. Multiple focused effects are often clearer than one large effect, and custom hooks can compose built-in hooks internally.", + "tags": [ + "hooks", + "custom-hooks", + "effects", + "architecture" + ], + "references": [ + "Hooks", + "Components" + ] + }, + { + "id": "react-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A long feed rerenders after a filter change. One item switches from to at the same position in the tree.", + "prompt": "How will React typically treat that position during reconciliation?", + "options": [ + "It will unmount the previous component type and mount the new one because the element type changed", + "It will preserve the old component instance because the position is the same", + "It will only update text nodes and ignore the component type change", + "It will throw because mixed component types are not allowed in lists" + ], + "correctAnswer": 0, + "explanation": "When the element type changes at a given position, React generally treats it as a different subtree, so the old one is removed and a new one is mounted.", + "tags": [ + "reconciliation", + "component-identity", + "rendering" + ], + "references": [ + "Reconciliation", + "Components" + ] + }, + { + "id": "react-q24", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A component receives count as a prop. Inside an event handler, a teammate increments count directly with count++ and expects the UI to refresh.", + "prompt": "Why is this incorrect in React?", + "options": [ + "Props are read-only inputs and mutating them does not tell React to rerender", + "JavaScript does not allow incrementing numbers in functions", + "Props can only be changed in useEffect", + "React forbids arithmetic inside event handlers" + ], + "correctAnswer": 0, + "explanation": "Props are immutable from the receiving component's perspective. Updating UI requires state changes in the owning component, not direct prop mutation.", + "tags": [ + "props", + "immutability", + "data-flow" + ], + "references": [ + "State & Props", + "Components" + ] + }, + { + "id": "react-q25", + "type": "matching", + "difficulty": "expert", + "scenario": "You are preparing a training session on how React concepts connect to practical UI behavior.", + "prompt": "Match each premise to the best corresponding concept.", + "premises": [ + "Choosing stable identifiers for list items", + "Embedding JavaScript expressions inside markup-like syntax", + "Passing data downward from parent to child", + "Reading the latest input element node imperatively" + ], + "responses": [ + "Keys", + "JSX", + "Props", + "Ref" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Stable identifiers map to keys, embedded expressions are part of JSX, downward data flow uses props, and direct element access is typically done with refs.", + "tags": [ + "jsx", + "keys", + "props", + "refs" + ], + "references": [ + "JSX", + "Components", + "State & Props", + "Reconciliation" + ] + }, + { + "id": "react-q26", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A parent component conditionally renders a child panel. Users report that the panel's local form state disappears whenever they toggle visibility off and back on.", + "prompt": "What is the most accurate reason?", + "options": [ + "Unmounting removes the child component instance, so its local state is lost when it mounts again", + "React clears all application state when any branch is hidden", + "Local state only works in root components", + "Conditional rendering converts controlled inputs into uncontrolled ones automatically" + ], + "correctAnswer": 0, + "explanation": "When a component is removed from the tree, its instance and local state are discarded. Mounting it again creates a fresh instance.", + "tags": [ + "components", + "state-preservation", + "conditional-rendering" + ], + "references": [ + "Components", + "Reconciliation", + "State & Props" + ] + }, + { + "id": "react-q27", + "type": "true-false", + "difficulty": "advanced", + "scenario": "An engineer claims that because React uses a virtual DOM, direct DOM manipulation through external scripts is always safe and React will automatically reconcile with those changes.", + "prompt": "True or false: arbitrary external DOM mutations can conflict with React's assumptions about the rendered tree.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "React expects the DOM to reflect what it previously committed. External mutations can create mismatches and unpredictable behavior unless carefully isolated.", + "tags": [ + "dom", + "virtual-dom", + "integration" + ], + "references": [ + "DOM Fundamentals", + "Virtual DOM" + ] + }, + { + "id": "react-q28", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A performance regression appears after moving expensive filtering logic into a child component that rerenders on every keystroke from an unrelated parent input.", + "prompt": "What is the best first diagnosis before adding optimizations everywhere?", + "options": [ + "Check whether the child actually depends on the changing parent state and whether its props are stable", + "Convert every component in the app to useRef", + "Replace React state with manual DOM mutation", + "Assume the virtual DOM is slow and remove JSX" + ], + "correctAnswer": 0, + "explanation": "Performance work should start by identifying why a component rerenders and whether the rerender is necessary. Dependency analysis and prop stability are more reliable than blanket optimization.", + "tags": [ + "performance", + "components", + "rendering", + "props" + ], + "references": [ + "Components", + "Virtual DOM", + "Reconciliation" + ] + }, + { + "id": "react-q29", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A checkout flow calculates totals from cart items, shipping rules, and discount codes. The team is debating what should live in state versus what should be recomputed during render.", + "prompt": "Which values are usually better treated as derived values rather than independently stored state, assuming the source inputs are already available?", + "options": [ + "The formatted grand total computed from cart lines and discount rules", + "The raw user-entered coupon code text field value", + "A filtered subset of visible items based on current search criteria", + "The source cart items array fetched from the server", + "A boolean that can be computed directly from whether the cart length is zero" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Computed totals, filtered subsets, and booleans derivable from existing inputs generally need not be separate state. Source inputs such as the cart data and raw form input are primary state or props.", + "tags": [ + "state", + "derived-state", + "rendering", + "data-modeling" + ], + "references": [ + "State & Props", + "Hooks" + ] + }, + { + "id": "react-q30", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A new developer asks why a component must return one parent element or an explicit fragment around siblings in JSX.", + "prompt": "What is the best explanation?", + "options": [ + "A component render must produce a single React element tree root, which can be a fragment grouping siblings", + "Browsers reject adjacent elements in HTML generated by JavaScript", + "React components can only render div elements at the top level", + "Fragments are required only for styling purposes" + ], + "correctAnswer": 0, + "explanation": "A component returns one tree root. Fragments allow multiple siblings to be grouped without adding unnecessary DOM nodes.", + "tags": [ + "jsx", + "components", + "fragments" + ], + "references": [ + "JSX", + "Components", + "Introduction" + ] + }, + { + "id": "react-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A senior engineer reviews a bug where a stale closure in an effect logs an outdated value after several renders. The effect intentionally omits a changing dependency.", + "prompt": "What is the most accurate assessment?", + "options": [ + "Omitting a changing dependency can cause the effect to capture outdated values from an earlier render", + "Effects always read live state values even when dependencies are omitted", + "Closures do not apply to React components", + "The bug is caused only by JSX syntax, not by effect dependencies" + ], + "correctAnswer": 0, + "explanation": "Effects capture values from the render in which they were created. If a changing value is omitted from dependencies, the effect may keep using a stale closure from an older render.", + "tags": [ + "hooks", + "effects", + "closures", + "debugging" + ], + "references": [ + "Hooks", + "Quiz" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/rxjs.quiz.json b/quiz-banks/rxjs.quiz.json new file mode 100644 index 0000000..371478a --- /dev/null +++ b/quiz-banks/rxjs.quiz.json @@ -0,0 +1,854 @@ +{ + "moduleId": "rxjs", + "moduleTitle": "RxJS Reactive Programming", + "description": "Scenario-based assessment of advanced RxJS stream design, operator tradeoffs, multicasting, marble testing, and failure recovery.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "rxjs-q01", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An ecommerce search box sends a request on every keystroke. Users type quickly, and stale responses are occasionally rendering after newer ones.", + "prompt": "Which operator should replace the current flattening strategy so only the latest request is allowed to update the UI?", + "options": [ + "mergeMap", + "concatMap", + "switchMap", + "exhaustMap" + ], + "correctAnswer": 2, + "explanation": "switchMap cancels the previous inner subscription whenever a new search term arrives, which prevents stale responses from racing ahead of newer ones.", + "tags": [ + "switchMap", + "autocomplete", + "cancellation" + ], + "references": [ + "Operators", + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A telemetry pipeline emits upload batches continuously. Dropping a batch is unacceptable, the backend allows at most three concurrent writes, and one failed upload must not kill the whole stream.", + "prompt": "Which pipeline choices fit these constraints?", + "options": [ + "Use mergeMap(batch => save$(batch).pipe(catchError(() => EMPTY)), 3)", + "Use switchMap so each new batch cancels the previous write", + "Use concatMap if preserving strict input order matters more than throughput", + "Put catchError outside the mergeMap so the first failing save completes the entire batch stream", + "Use exhaustMap so new batches are ignored while one write is active" + ], + "correctAnswer": [ + 0, + 2 + ], + "explanation": "mergeMap with a concurrency limit of 3 satisfies the throughput constraint, and handling errors inside each save prevents the outer stream from terminating. concatMap is also valid when ordered delivery is more important than parallelism.", + "tags": [ + "mergeMap", + "concatMap", + "concurrency", + "batch-processing" + ], + "references": [ + "Advanced Operators", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A dashboard service caches a polling stream with shareReplay({ bufferSize: 1, refCount: false }). Widgets subscribe and unsubscribe as routes change.", + "prompt": "True or false: with that configuration, the upstream polling source can stay subscribed even after the last widget unsubscribes.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "With refCount set to false, the shared subscription is not automatically torn down when subscriber count reaches zero, so the source can remain active.", + "tags": [ + "shareReplay", + "multicasting", + "memory-leaks" + ], + "references": [ + "Subjects", + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q04", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A checkout flow combines submit clicks with the latest fraud score and form validity. The fraud score updates independently, but risk evaluation should only run when the user clicks submit.", + "prompt": "Which combination operator best models this trigger pattern?", + "options": [ + "combineLatest", + "withLatestFrom", + "zip", + "race" + ], + "correctAnswer": 1, + "explanation": "withLatestFrom uses one source as the trigger and samples the latest values from the other streams only when that trigger emits.", + "tags": [ + "withLatestFrom", + "combineLatest", + "forms" + ], + "references": [ + "Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q05", + "type": "ordering", + "difficulty": "expert", + "scenario": "A trading app consumes a websocket feed. The team wants to reconnect with backoff on disconnects, ignore malformed messages, derive the latest order book state, and share that state across multiple widgets.", + "prompt": "Put these steps in the most appropriate conceptual order from source creation to final shared stream.", + "items": [ + "Create the websocket source", + "Attach retryWhen so disconnects resubscribe with backoff", + "Filter out malformed payloads from each connection", + "Use scan to build the latest order book state", + "Share the derived state with shareReplay" + ], + "correctAnswer": [ + "Create the websocket source", + "Attach retryWhen so disconnects resubscribe with backoff", + "Filter out malformed payloads from each connection", + "Use scan to build the latest order book state", + "Share the derived state with shareReplay" + ], + "explanation": "Start from the source, define the reconnection policy, validate incoming messages, accumulate state with scan, and only then multicast the derived stream for downstream consumers.", + "tags": [ + "ordering", + "webSocket", + "retryWhen", + "shareReplay" + ], + "references": [ + "Advanced Operators", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q06", + "type": "matching", + "difficulty": "advanced", + "scenario": "During an architecture review, your team needs to pick the correct subject type for four different collaboration patterns.", + "prompt": "Match each RxJS subject type to the most suitable behavior.", + "premises": [ + "Subject", + "BehaviorSubject", + "ReplaySubject", + "AsyncSubject" + ], + "responses": [ + "Late subscribers need the current state immediately and an initial seed exists", + "Only the final emitted value should be delivered once the source completes", + "Late subscribers must receive a configurable slice of recent history", + "Values should be multicasted only to current subscribers with no replay" + ], + "correctAnswer": [ + 3, + 0, + 2, + 1 + ], + "explanation": "Subject has no replay, BehaviorSubject stores the current value, ReplaySubject stores a history window, and AsyncSubject emits the last value only upon completion.", + "tags": [ + "subjects", + "BehaviorSubject", + "ReplaySubject", + "AsyncSubject" + ], + "references": [ + "Subjects", + "Core Components" + ] + }, + { + "id": "rxjs-q07", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A mobile app login button is being tapped repeatedly on slow networks, producing duplicate authentication attempts and backend session conflicts.", + "prompt": "Which flattening operator is the safest default if later taps should be ignored until the current login finishes?", + "options": [ + "mergeMap", + "switchMap", + "concatMap", + "exhaustMap" + ], + "correctAnswer": 3, + "explanation": "exhaustMap ignores new outer emissions while an inner observable is active, which makes it well suited for duplicate-submit prevention.", + "tags": [ + "exhaustMap", + "authentication", + "higher-order-mapping" + ], + "references": [ + "Operators", + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q08", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A search box uses switchMap(term => http(term).pipe(...)). The team wants a failed request to recover gracefully while still allowing future search terms to trigger new requests.", + "prompt": "Which statements are correct about catchError placement in this design?", + "options": [ + "Placing catchError inside the switchMap lets a failed request recover without terminating the outer search term stream", + "Placing catchError after the switchMap will catch inner errors, but if it returns a finite fallback and completes, the outer search term stream can stop", + "If catchError inside the switchMap returns EMPTY, only that inner request ends and later search terms can still trigger new requests", + "Placing catchError inside the switchMap always rethrows errors to the outer stream", + "Using catchError outside the switchMap is the only way to log the failing term" + ], + "correctAnswer": [ + 0, + 1, + 2 + ], + "explanation": "Inner catchError localizes failure to the request associated with one term. An outer catchError can recover too, but depending on what it returns, it can also complete the entire chain and stop future searches.", + "tags": [ + "catchError", + "switchMap", + "error-boundaries" + ], + "references": [ + "Error Handling", + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q09", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A collaborative editor autosaves drafts. Each save must reach the server in the same order that edits were accepted, and a later save must wait for the prior one to finish.", + "prompt": "Which operator most directly enforces that policy?", + "options": [ + "mergeMap", + "concatMap", + "switchMap", + "throttleTime" + ], + "correctAnswer": 1, + "explanation": "concatMap queues inner observables and subscribes to them one at a time, preserving ordering across saves.", + "tags": [ + "concatMap", + "autosave", + "ordering" + ], + "references": [ + "Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q10", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A loading spinner is hidden with finalize(() => hideSpinner()) on an HTTP request that may complete, error, or be cancelled by takeUntil when a view unmounts.", + "prompt": "True or false: finalize runs for completion, error, and explicit unsubscription.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "finalize registers teardown logic and runs whenever the subscription ends, including normal completion, errors, and manual unsubscription.", + "tags": [ + "finalize", + "teardown", + "unsubscribe" + ], + "references": [ + "Error Handling", + "Core Components" + ] + }, + { + "id": "rxjs-q11", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A filter state stream emits new object literals on every render, even when the semantic filter has not changed. Duplicate downstream analytics events are being recorded.", + "prompt": "What is the most precise RxJS-side fix?", + "options": [ + "Add debounceTime(0) so equal objects collapse", + "Use distinctUntilChanged() with a custom comparator for the fields that matter", + "Replace the stream with a BehaviorSubject so duplicate objects are ignored", + "Use share() before subscribing" + ], + "correctAnswer": 1, + "explanation": "distinctUntilChanged uses reference equality by default, so object-shaped state usually needs a custom comparator keyed to the meaningful fields.", + "tags": [ + "distinctUntilChanged", + "comparators", + "state-streams" + ], + "references": [ + "Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q12", + "type": "multi-select", + "difficulty": "expert", + "scenario": "Application configuration is loaded over HTTP and shared by many lazily mounted widgets. The team wants one shared request per active consumer set, replay of the latest config to late subscribers, and no permanently pinned subscription after all widgets unmount.", + "prompt": "Which choices help satisfy this design?", + "options": [ + "Use shareReplay({ bufferSize: 1, refCount: true }) on the HTTP stream", + "Use shareReplay({ bufferSize: 1, refCount: false }) if automatic teardown when widgets unmount is required", + "Place shareReplay after any mapping that produces the final configuration shape consumers need", + "Subscribe in a global singleton and never unsubscribe so the cache is always warm", + "If the source can error, decide whether to recover before the replay boundary so the cached terminal state matches intent" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "refCount true allows teardown when the last consumer leaves, placing shareReplay after shaping avoids repeating transform work, and error handling should be chosen deliberately before replay so the shared terminal behavior is correct.", + "tags": [ + "shareReplay", + "caching", + "http", + "multicasting" + ], + "references": [ + "Subjects", + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q13", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A page must load user profile, permissions, and feature flags. Rendering should occur once all three one-shot requests complete successfully, using their final values only.", + "prompt": "Which combinator matches that behavior most directly?", + "options": [ + "combineLatest", + "forkJoin", + "merge", + "withLatestFrom" + ], + "correctAnswer": 1, + "explanation": "forkJoin waits for all input observables to complete and then emits one combined result using their last emitted values.", + "tags": [ + "forkJoin", + "combination", + "http" + ], + "references": [ + "Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q14", + "type": "matching", + "difficulty": "expert", + "scenario": "A teammate is reviewing marble tests and wants to confirm the meaning of several shorthand fragments before diagnosing a timing bug.", + "prompt": "Match each marble fragment to its meaning.", + "premises": [ + "--a-b-|", + "--#", + "^---!", + "(abc)|" + ], + "responses": [ + "Subscription starts now and is explicitly unsubscribed later", + "Several values are emitted in the same frame and then the stream completes", + "The stream errors at that point in time", + "Two next notifications occur over time and then the stream completes" + ], + "correctAnswer": [ + 3, + 2, + 0, + 1 + ], + "explanation": "Standard marble syntax uses letters for next notifications, # for error, ! for unsubscription, and parentheses for synchronous grouped emissions in the same frame.", + "tags": [ + "marbles", + "tests", + "scheduler" + ], + "references": [ + "Marble Diagrams", + "Visualization Tool" + ] + }, + { + "id": "rxjs-q15", + "type": "ordering", + "difficulty": "advanced", + "scenario": "A profile form should wait for typing to pause, ignore invalid states, skip resubmitting equivalent payloads, send only the newest valid save, and surface a fallback result when a save fails.", + "prompt": "Order these operations to build the intended pipeline.", + "items": [ + "debounceTime to wait for typing to settle", + "filter for valid form states", + "distinctUntilChanged with a payload comparator", + "switchMap to a save request for the newest payload", + "Inside that request chain, use catchError to return a fallback result" + ], + "correctAnswer": [ + "debounceTime to wait for typing to settle", + "filter for valid form states", + "distinctUntilChanged with a payload comparator", + "switchMap to a save request for the newest payload", + "Inside that request chain, use catchError to return a fallback result" + ], + "explanation": "First reduce noise, then gate by validity, then suppress semantically identical payloads. After that, switch to the newest request and catch request-level failures inside the switched save chain.", + "tags": [ + "ordering", + "forms", + "switchMap", + "catchError" + ], + "references": [ + "Operators", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An operations stream carries events for many tenants. You need a rolling failure count per tenant without manually pre-splitting the source into separate observables.", + "prompt": "Which operator gives the most direct starting point for that design?", + "options": [ + "partition", + "pairwise", + "groupBy", + "bufferCount" + ], + "correctAnswer": 2, + "explanation": "groupBy partitions a source stream into grouped observables keyed by a selector, which is the standard starting point for per-key aggregation.", + "tags": [ + "groupBy", + "aggregation", + "multi-tenant" + ], + "references": [ + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q17", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A shared state container is implemented with a BehaviorSubject seeded during hydration. A component subscribes after several updates have already happened.", + "prompt": "True or false: the late subscriber receives the most recent value immediately, even if that value was emitted before the subscription began.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "BehaviorSubject always holds a current value and synchronously emits that current value to new subscribers.", + "tags": [ + "BehaviorSubject", + "state-management", + "hot-observable" + ], + "references": [ + "Subjects", + "Core Components" + ] + }, + { + "id": "rxjs-q18", + "type": "multi-select", + "difficulty": "expert", + "scenario": "An API fails intermittently during traffic spikes. You are designing retryWhen with exponential backoff and a clear stop condition so the UI can surface a terminal error after reasonable retries.", + "prompt": "Which statements are sound for this design?", + "options": [ + "retryWhen can derive delay timing from the error stream, for example by pairing errors with attempt counts", + "A backoff strategy should usually include a maximum attempt count or another terminal condition", + "retryWhen automatically preserves request idempotency, so repeated POST side effects are always safe", + "When retries are exhausted, rethrowing or returning throwError from the notifier path lets downstream error handling run", + "Putting shareReplay before retryWhen guarantees each subscriber gets an independent retry budget" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "retryWhen is flexible enough to implement backoff from the error notifier stream, but retry safety still depends on the side effect being retried. A stop condition is essential, and exhausted retries should re-signal failure so downstream logic can react.", + "tags": [ + "retryWhen", + "backoff", + "resilience" + ], + "references": [ + "Advanced Operators", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q19", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A chart should update at most every 200 ms while a user drags a scrollbar. The team wants periodic updates during the drag instead of waiting until interaction fully stops.", + "prompt": "Which operator is the better fit than debounceTime?", + "options": [ + "auditTime", + "last", + "reduce", + "takeLast" + ], + "correctAnswer": 0, + "explanation": "auditTime emits the most recent value at the chosen interval while activity continues, whereas debounceTime waits for silence before emitting.", + "tags": [ + "auditTime", + "ui-performance", + "rate-limiting" + ], + "references": [ + "Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A portfolio page refreshes automatically every 30 seconds, but the user can also trigger manual refreshes. Whenever either trigger fires, any in-flight request should be abandoned and the newest refresh should win.", + "prompt": "Which flattening strategy should drive the request stream?", + "options": [ + "concatMap", + "switchMap", + "exhaustMap", + "expand" + ], + "correctAnswer": 1, + "explanation": "switchMap cancels the currently active refresh request whenever a newer trigger arrives, which is exactly the desired newest-wins behavior.", + "tags": [ + "switchMap", + "polling", + "manual-refresh" + ], + "references": [ + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q21", + "type": "matching", + "difficulty": "advanced", + "scenario": "A code review compares higher-order mapping choices against the production bugs they can create when used in the wrong context.", + "prompt": "Match each operator misuse to its most likely symptom.", + "premises": [ + "Using mergeMap for an autocomplete request stream", + "Using switchMap for non-idempotent file uploads", + "Using exhaustMap for a rapid-fire save button", + "Using concatMap for latency-sensitive live typing previews" + ], + "responses": [ + "New clicks are ignored while one request is active", + "Older uploads can be cancelled before they finish", + "Responses may arrive out of order and stale data can flash", + "Updates queue behind each other and the preview lags" + ], + "correctAnswer": [ + 2, + 1, + 0, + 3 + ], + "explanation": "Each flattening operator encodes a tradeoff: mergeMap allows concurrency, switchMap cancels prior work, exhaustMap ignores new triggers during active work, and concatMap queues work sequentially.", + "tags": [ + "operator-tradeoffs", + "higher-order-mapping", + "code-review" + ], + "references": [ + "Advanced Operators", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q22", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A monitoring widget should turn failures into ordinary values so the UI can render an inline error tile through the same rendering pipeline used for successful events.", + "prompt": "Which operator is the most direct starting point for converting next, error, and complete signals into data?", + "options": [ + "materialize", + "dematerialize", + "repeatWhen", + "timeout" + ], + "correctAnswer": 0, + "explanation": "materialize converts notifications into Notification objects, allowing errors and completion to be treated as regular values. dematerialize is the inverse step if you later need to restore stream semantics.", + "tags": [ + "materialize", + "notifications", + "error-as-data" + ], + "references": [ + "Advanced Operators", + "Error Handling" + ] + }, + { + "id": "rxjs-q23", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A KPI widget combines two long-lived streams with combineLatest, but one source is a rarely updated feature-flag stream that has not emitted since startup.", + "prompt": "True or false: combineLatest emits nothing until every source has produced at least one value.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "combineLatest requires an initial emission from each input before it can produce a combined value.", + "tags": [ + "combineLatest", + "initial-emission", + "dashboards" + ], + "references": [ + "Operators", + "Core Components" + ] + }, + { + "id": "rxjs-q24", + "type": "multi-select", + "difficulty": "advanced", + "scenario": "A shared service exposes an internal Subject directly to many features. Months later, the team discovers memory leaks and accidental next calls from code that should have been read-only.", + "prompt": "Which changes improve the design?", + "options": [ + "Expose subject.asObservable() to consumers instead of the writable Subject", + "Rely on garbage collection alone; completion and unsubscription strategy do not matter for Subjects", + "Use takeUntil or an equivalent teardown pattern in subscribers with component lifecycles", + "Keep the Subject public so any feature can emit events directly", + "Complete or replace long-lived bridge Subjects deliberately when their owning scope is disposed" + ], + "correctAnswer": [ + 0, + 2, + 4 + ], + "explanation": "Encapsulation prevents arbitrary writes, lifecycle-based teardown prevents leaked subscriptions, and deliberate disposal of bridge subjects avoids holding onto observers longer than intended.", + "tags": [ + "Subject", + "encapsulation", + "memory-leaks" + ], + "references": [ + "Subjects", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A startup routine wraps a server-sent events stream with firstValueFrom to await the first business event, but that source may stay open indefinitely and might not emit the needed event quickly.", + "prompt": "What is the main risk if the stream never emits a qualifying value?", + "options": [ + "firstValueFrom will automatically resolve undefined after one scheduler frame", + "The promise can hang indefinitely unless you bound the stream with operators like timeout, take, or takeUntil", + "RxJS will convert the lack of emissions into a complete notification immediately", + "The stream becomes cold and replays all past events" + ], + "correctAnswer": 1, + "explanation": "firstValueFrom resolves on the first emission or rejects on error, but without a bound on a long-lived or silent source, it can await forever.", + "tags": [ + "firstValueFrom", + "promises", + "timeouts" + ], + "references": [ + "Observables", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q26", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A hot UI state stream drives expensive DOM paint-related updates. You want scheduled work aligned to the browser's next repaint rather than a generic macrotask.", + "prompt": "Which scheduler is the most appropriate?", + "options": [ + "queueScheduler", + "asapScheduler", + "asyncScheduler", + "animationFrameScheduler" + ], + "correctAnswer": 3, + "explanation": "animationFrameScheduler schedules work to run in coordination with the browser's animation frame, which is ideal for render-related updates.", + "tags": [ + "schedulers", + "rendering", + "performance" + ], + "references": [ + "Core Components", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q27", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A sensor stream emits temperature readings every second. You need to derive whether the trend is rising or falling by comparing each reading to the immediately previous one.", + "prompt": "Which operator gives the cleanest starting shape for that comparison?", + "options": [ + "scan", + "pairwise", + "groupBy", + "windowTime" + ], + "correctAnswer": 1, + "explanation": "pairwise emits tuples containing the previous and current values, which is exactly the shape needed for step-to-step trend detection.", + "tags": [ + "pairwise", + "time-series", + "analysis" + ], + "references": [ + "Advanced Operators", + "Real-World Examples" + ] + }, + { + "id": "rxjs-q28", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A dashboard widget should fail a stale request if no value arrives within 2 seconds, then recover with a cached snapshot instead of spinning forever.", + "prompt": "Which operator most directly enforces the latency boundary before recovery logic runs?", + "options": [ + "timeout", + "delayWhen", + "repeat", + "skipUntil" + ], + "correctAnswer": 0, + "explanation": "timeout converts excessive silence into an error, which can then be handled with catchError or another recovery strategy.", + "tags": [ + "timeout", + "fallback", + "latency" + ], + "references": [ + "Advanced Operators", + "Error Handling", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q29", + "type": "matching", + "difficulty": "expert", + "scenario": "You are reviewing four small error-handling patterns and need to verify the runtime effect each one produces before approving a refactor.", + "prompt": "Match each pattern to its runtime effect.", + "premises": [ + "retry(2)", + "catchError(() => of([]))", + "finalize(() => log('done'))", + "throwError(() => err)" + ], + "responses": [ + "Creates a stream that immediately errors with the supplied error when subscribed", + "Runs teardown side effects when the stream completes, errors, or is unsubscribed", + "Resubscribes up to the configured number of times before allowing failure to escape", + "Replaces the failure with a fallback value stream and completes successfully" + ], + "correctAnswer": [ + 2, + 3, + 1, + 0 + ], + "explanation": "retry resubscribes, catchError replaces failure with a recovery stream, finalize performs teardown-side effects, and throwError creates an observable that errors upon subscription.", + "tags": [ + "retry", + "catchError", + "finalize", + "throwError" + ], + "references": [ + "Error Handling", + "Core Components" + ] + }, + { + "id": "rxjs-q30", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A real-time operations dashboard combines CPU, memory, and deployment streams. Some inputs are quiet at startup, widgets mount late, and the combined view should avoid blank initial panels where practical.", + "prompt": "Which approaches are useful for this setup?", + "options": [ + "Seed quiet sources with startWith so combineLatest can produce an initial aggregate sooner", + "Use shareReplay(1) on the combined stream if late-subscribing widgets should see the latest derived state immediately", + "Replace combineLatest with zip when you want ongoing updates from the latest value of each source", + "If one source should only sample others when it emits, consider withLatestFrom instead of combineLatest", + "Depend on combineLatest alone to invent startup values for sources that have not emitted yet" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "startWith can seed missing initial values, shareReplay helps late subscribers catch up immediately, and withLatestFrom is the right alternative when one stream should act as the sole trigger.", + "tags": [ + "combineLatest", + "startWith", + "shareReplay", + "withLatestFrom" + ], + "references": [ + "Operators", + "Subjects", + "Real-World Applications" + ] + }, + { + "id": "rxjs-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A regression appears only when switchMap should cancel the previous inner subscription. The team is writing a marble test and wants proof of cancellation, not just matching output values.", + "prompt": "Which testing technique gives the strongest evidence that cancellation actually happened?", + "options": [ + "Assert only the emitted values from the outer observable", + "Use expectSubscriptions on the inner stream marbles to verify the previous subscription was unsubscribed early", + "Wrap the test in setTimeout and inspect console logs", + "Use a ReplaySubject so cancellation no longer matters" + ], + "correctAnswer": 1, + "explanation": "Output assertions can miss incorrect subscription lifecycles. expectSubscriptions lets you assert the exact subscription and unsubscription windows for inner observables.", + "tags": [ + "TestScheduler", + "expectSubscriptions", + "switchMap" + ], + "references": [ + "Marble Diagrams", + "Visualization Tool", + "Quiz" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/systemdesign.quiz.json b/quiz-banks/systemdesign.quiz.json new file mode 100644 index 0000000..65b48bd --- /dev/null +++ b/quiz-banks/systemdesign.quiz.json @@ -0,0 +1,855 @@ +{ + "moduleId": "systemdesign", + "moduleTitle": "System Design", + "description": "Scenario-driven assessment of advanced system design trade-offs, distributed systems constraints, scaling patterns, architectural governance, and architecture communication techniques.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "systemdesign-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A B2B SaaS startup has one product team, fewer than 10 engineers, and needs to ship a first version in three months. The core workflow spans billing, user management, and reporting in one transactional path, and forecasted traffic is moderate.", + "prompt": "Which architecture is the most defensible starting point?", + "options": [ + "Monolithic architecture", + "Microservices architecture", + "Event-driven architecture with separate services", + "Serverless-only architecture" + ], + "correctAnswer": 0, + "explanation": "A monolith is usually the most practical starting point for a small team with tight timelines, shared transactions, and modest scale. It minimizes operational overhead while preserving strong internal consistency.", + "tags": [ + "monolith", + "trade-offs", + "time-to-market", + "transactions" + ], + "references": [ + "Introduction", + "Architecture Patterns" + ] + }, + { + "id": "systemdesign-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "Six months later, the same company splits into 12 domain teams and decomposes the platform into services. Early staging tests show request chains spanning five services, and incidents take hours to trace.", + "prompt": "Which capabilities become immediately necessary to operate this design responsibly?", + "options": [ + "Service discovery and versioned service contracts", + "Distributed tracing with correlation IDs", + "A single in-process transaction spanning every service call", + "Timeouts, retries, and circuit breakers between services", + "A rule that every service must use the same programming language" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Once the system becomes distributed, service discovery, tracing, and resilience patterns become operational essentials. A single in-process transaction is not realistic across independent services, and mandatory language uniformity does not solve the core runtime problems.", + "tags": [ + "microservices", + "observability", + "resilience", + "operations" + ], + "references": [ + "Architecture Patterns", + "Distributed Systems", + "Design Principles" + ] + }, + { + "id": "systemdesign-q03", + "type": "true-false", + "difficulty": "expert", + "scenario": "A social feed runs across two regions and decides to remain available during an inter-region partition, even if some replicas lag behind.", + "prompt": "True or false: during the partition, some users may read stale data even though the service still responds.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Choosing availability during a partition often means accepting stale or divergent reads until replicas converge again. That is the core trade-off behind AP-style behavior.", + "tags": [ + "cap-theorem", + "availability", + "eventual-consistency", + "partitions" + ], + "references": [ + "Distributed Systems" + ] + }, + { + "id": "systemdesign-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are preparing an architecture deck for audiences ranging from executives to the implementation team.", + "prompt": "Arrange the C4 views from the highest level of abstraction to the deepest level of detail.", + "items": [ + "Component diagram", + "Code diagram", + "Container diagram", + "System Context diagram" + ], + "correctAnswer": [ + "System Context diagram", + "Container diagram", + "Component diagram", + "Code diagram" + ], + "explanation": "The C4 model moves from broad context to implementation detail: System Context, then Container, then Component, then Code. That sequence progressively narrows the audience and technical depth.", + "tags": [ + "c4-model", + "visualization", + "communication", + "architecture-diagrams" + ], + "references": [ + "Visualization" + ] + }, + { + "id": "systemdesign-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "An architecture review board is mapping production problems to the most appropriate design concept.", + "prompt": "Match each situation to the best response.", + "premises": [ + "A payment ledger cannot tolerate stale reads after a successful charge", + "A feed ranking service can accept temporary divergence across replicas", + "An order event should trigger billing, analytics, and email without tight coupling", + "A downstream dependency becomes slow and repeated failures should be cut off quickly" + ], + "responses": [ + "Strong consistency", + "Eventual consistency", + "Event-driven architecture", + "Circuit breaker" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Critical financial state favors strong consistency, less critical replicated views can tolerate eventual consistency, multi-consumer workflows fit event-driven design, and repeated dependency failures are a classic circuit-breaker use case.", + "tags": [ + "consistency-models", + "event-driven", + "resilience", + "mapping" + ], + "references": [ + "Distributed Systems", + "Architecture Patterns" + ] + }, + { + "id": "systemdesign-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A global live-streaming product serves identical video segments to viewers across continents, and origin servers are saturating during major sports events.", + "prompt": "Which change most directly reduces origin load and median latency for this traffic pattern?", + "options": [ + "Deploy a CDN with edge caching close to viewers", + "Move the metadata database from SQL to NoSQL", + "Increase the monolith's heap size", + "Force every client to reconnect more often" + ], + "correctAnswer": 0, + "explanation": "A CDN is designed to serve repeatable content from edge locations near users, which lowers latency and shields origins from redundant requests. The other options do not directly address the dominant traffic pattern.", + "tags": [ + "cdn", + "edge-caching", + "latency", + "origin-offload" + ], + "references": [ + "Scaling Strategies", + "Case Studies" + ] + }, + { + "id": "systemdesign-q07", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A chat platform keeps session state in memory and runs on a mixed fleet where some nodes have twice the CPU and RAM of others. Connection duration is highly uneven across users.", + "prompt": "Which strategies directly address either session affinity or uneven node and connection load?", + "options": [ + "IP hash to keep a given client on the same node", + "Weighted round robin so stronger nodes receive more traffic", + "Pure round robin with no awareness of session locality", + "Least connections for workloads with uneven connection duration", + "Vertical scaling the database instead of balancing application traffic" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "IP hash helps with sticky routing, weighted round robin reflects heterogeneous capacity, and least connections is well suited to long-lived or uneven connection workloads. Pure round robin ignores the stated constraints, and database scaling does not solve application-tier distribution.", + "tags": [ + "load-balancing", + "session-affinity", + "weighted-round-robin", + "least-connections" + ], + "references": [ + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q08", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An order pipeline must let inventory, billing, fraud detection, and email react independently to an OrderPlaced event without the checkout service waiting on each consumer.", + "prompt": "Which architecture best fits that requirement?", + "options": [ + "Event-driven architecture", + "Monolithic architecture", + "Shared-library integration", + "Strongly consistent two-phase commit between every consumer" + ], + "correctAnswer": 0, + "explanation": "Event-driven architecture decouples producers from multiple downstream consumers and supports asynchronous fan-out. The alternative options either increase coupling or force unnecessary synchronous coordination.", + "tags": [ + "event-driven", + "decoupling", + "async-processing", + "workflows" + ], + "references": [ + "Architecture Patterns", + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q09", + "type": "true-false", + "difficulty": "expert", + "scenario": "A multi-region configuration store must keep all readers on the same value during a partition, even if that means refusing some requests.", + "prompt": "True or false: once partition tolerance is required, preserving that consistency guarantee may force the system to sacrifice availability for some operations.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Under the CAP trade-off, a partition-tolerant system that insists on strong consistency may need to reject or delay requests rather than serve potentially stale answers.", + "tags": [ + "cap-theorem", + "consistency", + "availability", + "multi-region" + ], + "references": [ + "Distributed Systems" + ] + }, + { + "id": "systemdesign-q10", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A streaming catalog receives far more search and browse reads than writes. Product changes must commit reliably, but the read path needs denormalized, low-latency search documents optimized separately from the write model.", + "prompt": "Which pattern is the best fit?", + "options": [ + "CQRS with a separate read model and write model", + "A single global mutex around the database", + "Pure round robin load balancing", + "A monolith with only vertical scaling" + ], + "correctAnswer": 0, + "explanation": "CQRS is a good fit when read and write workloads have different scaling and modeling needs. It allows the write side to preserve correctness while the read side is optimized for fast queries and denormalized views.", + "tags": [ + "cqrs", + "read-model", + "write-model", + "read-heavy-systems" + ], + "references": [ + "Scaling Strategies", + "Case Studies", + "Architecture Patterns" + ] + }, + { + "id": "systemdesign-q11", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A video service is adding multi-level caching for title metadata. The team wants lower origin traffic without allowing stale data to persist indefinitely after updates.", + "prompt": "Which design choices help achieve that balance?", + "options": [ + "Apply TTLs so stale entries eventually expire", + "Emit invalidation events when metadata changes", + "Cache objects forever and rely on manual purges only", + "Layer caches across browser, CDN, and application tiers", + "Bypass caches on every read so caches stay accurate" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "TTL-based expiry, explicit invalidation, and layered caches are common ways to reduce origin traffic while controlling staleness. Permanent caches and universal bypasses each undermine the goal for opposite reasons.", + "tags": [ + "caching", + "ttl", + "invalidation", + "multi-level-cache" + ], + "references": [ + "Scaling Strategies", + "Case Studies" + ] + }, + { + "id": "systemdesign-q12", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A marketing platform processes unpredictable bursts of webhook events a few times per day. Each task is stateless, runs for under a second, and the company wants minimal operational overhead.", + "prompt": "Which architecture is the most defensible default?", + "options": [ + "Serverless functions triggered by events", + "A stateful monolith with fixed capacity", + "A synchronous microservice mesh for every webhook", + "A primary-secondary database cluster" + ], + "correctAnswer": 0, + "explanation": "Short-lived, bursty, stateless tasks map well to serverless because capacity scales with demand and operational management is minimized. The other options add complexity or solve a different problem.", + "tags": [ + "serverless", + "spiky-traffic", + "event-processing", + "operational-overhead" + ], + "references": [ + "Architecture Patterns", + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q13", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A product manager asks for a diagram showing your platform, its users, the payment gateway, and the identity provider, but not the internals of each application.", + "prompt": "Which C4 level should you start with?", + "options": [ + "System Context diagram", + "Container diagram", + "Component diagram", + "Code diagram" + ], + "correctAnswer": 0, + "explanation": "A System Context diagram is the right starting point when the audience needs to understand external actors, neighboring systems, and boundaries without implementation detail.", + "tags": [ + "c4-model", + "system-context", + "stakeholder-communication", + "visualization" + ], + "references": [ + "Visualization", + "Introduction" + ] + }, + { + "id": "systemdesign-q14", + "type": "ordering", + "difficulty": "expert", + "scenario": "Your architecture review board wants a disciplined decision process instead of ad hoc debates.", + "prompt": "Order the steps from earliest to latest in a sane governance workflow.", + "items": [ + "Record the decision and trade-offs in an ADR", + "Compare candidate patterns against the required quality attributes", + "Revisit the decision when production metrics or constraints change", + "Capture the most important constraints and quality attributes", + "Automate fitness functions to guard the chosen architecture" + ], + "correctAnswer": [ + "Capture the most important constraints and quality attributes", + "Compare candidate patterns against the required quality attributes", + "Record the decision and trade-offs in an ADR", + "Automate fitness functions to guard the chosen architecture", + "Revisit the decision when production metrics or constraints change" + ], + "explanation": "A mature governance loop starts with constraints, evaluates options, records the decision, enforces it with fitness functions, and revisits it when reality changes.", + "tags": [ + "governance", + "adrs", + "fitness-functions", + "quality-attributes" + ], + "references": [ + "Design Principles", + "Introduction" + ] + }, + { + "id": "systemdesign-q15", + "type": "matching", + "difficulty": "expert", + "scenario": "A staff engineer is mapping desired qualities to the technique most likely to produce them.", + "prompt": "Match each goal to the most appropriate principle or mechanism.", + "premises": [ + "A billing rule change should not require modifying unrelated notification code", + "A flaky recommendation dependency should not consume all checkout threads", + "Popular static assets should be served from infrastructure close to users", + "Adding a new payment provider should avoid editing stable calling code" + ], + "responses": [ + "Single Responsibility principle", + "Circuit breaker", + "CDN edge caching", + "Open-Closed principle" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Separation of concerns reduces unrelated reasons to change, circuit breakers protect resources from unhealthy dependencies, CDNs move content closer to users, and Open-Closed supports extension without modifying stable core code.", + "tags": [ + "solid", + "resilience", + "cdn", + "design-principles" + ], + "references": [ + "Design Principles", + "Scaling Strategies", + "Case Studies" + ] + }, + { + "id": "systemdesign-q16", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A ride-hailing platform splits trip creation, driver assignment, billing, and notifications into separate services. One customer action now spans several services, and the team wants to avoid brittle distributed transactions.", + "prompt": "Which design choices make the architecture more practical?", + "options": [ + "Make commands and event handlers idempotent", + "Define compensating actions for steps that may need reversal", + "Hold a global database lock across all services until the workflow ends", + "Accept eventual consistency across service boundaries where the business can tolerate it", + "Force every service to share the same schema in one database" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Idempotency, compensating actions, and selective acceptance of eventual consistency are practical ways to coordinate multi-service workflows. Global locking and a shared schema undermine service autonomy and scale poorly.", + "tags": [ + "distributed-transactions", + "idempotency", + "saga-thinking", + "eventual-consistency" + ], + "references": [ + "Distributed Systems", + "Architecture Patterns", + "Design Principles" + ] + }, + { + "id": "systemdesign-q17", + "type": "single-choice", + "difficulty": "expert", + "scenario": "In a monolith, thumbnail generation is CPU-bound and spikes during uploads, but checkout and search traffic remain steady. Scaling the whole application is becoming wasteful.", + "prompt": "Which change most directly improves resource efficiency?", + "options": [ + "Extract thumbnail processing into a separate service or async worker that scales independently", + "Add more CPU to the existing database server", + "Use stronger consistency across all requests", + "Replace architecture diagrams with code diagrams" + ], + "correctAnswer": 0, + "explanation": "Separating the hot, CPU-intensive workload allows targeted scaling and avoids overprovisioning unrelated parts of the system. The other options do not address the actual bottleneck.", + "tags": [ + "independent-scaling", + "microservices", + "async-workers", + "resource-efficiency" + ], + "references": [ + "Architecture Patterns", + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q18", + "type": "true-false", + "difficulty": "expert", + "scenario": "A data-import endpoint publishes jobs to a queue and returns immediately with an accepted status while workers process the backlog.", + "prompt": "True or false: this design can improve user-facing resilience during spikes even though the underlying work completes asynchronously.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Queues decouple request ingestion from expensive processing, smoothing spikes and protecting interactive request paths. The user-visible operation can remain responsive even if the full workflow finishes later.", + "tags": [ + "message-queues", + "async-processing", + "backpressure", + "resilience" + ], + "references": [ + "Scaling Strategies", + "Architecture Patterns" + ] + }, + { + "id": "systemdesign-q19", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A collaboration app keeps tens of thousands of long-lived websocket connections. Some nodes end up much busier than others because connection durations vary widely.", + "prompt": "Which balancing algorithm is the strongest default starting point?", + "options": [ + "Least connections", + "Round robin", + "IP hash", + "Weighted random without health checks" + ], + "correctAnswer": 0, + "explanation": "Least connections is a strong default for workloads with long-lived or uneven session lengths because it directs new work toward nodes with fewer active connections rather than blindly rotating.", + "tags": [ + "load-balancing", + "websockets", + "least-connections", + "traffic-distribution" + ], + "references": [ + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "During an inter-datacenter partition, a payment authorization service chooses a CP posture rather than AP because duplicate or contradictory authorizations are unacceptable.", + "prompt": "What is the most likely effect on some client requests during the partition?", + "options": [ + "They may time out or be rejected to preserve consistency", + "They will all succeed with the latest state everywhere", + "They will be transparently routed to stale replicas with no trade-off", + "The partition becomes irrelevant because the database is relational" + ], + "correctAnswer": 0, + "explanation": "If the system chooses consistency under partition, some operations may need to fail or wait rather than risk contradictory state. That is the practical cost of a CP choice.", + "tags": [ + "cp-systems", + "cap-theorem", + "payments", + "availability-trade-off" + ], + "references": [ + "Distributed Systems", + "Case Studies" + ] + }, + { + "id": "systemdesign-q21", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A platform team is deciding whether a mature monolith should be decomposed. They want evidence, not fashion.", + "prompt": "Which signals support moving toward microservices now?", + "options": [ + "Multiple teams are blocked by a shared release train", + "Different domains need independent scaling characteristics", + "The entire product is still simple and maintained by four engineers", + "Specific workloads would benefit from different technology stacks or runtime profiles", + "The organization still lacks observability and operational ownership, but wants microservices for resume value" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Independent team velocity, divergent scaling needs, and meaningful workload specialization are legitimate drivers for microservices. A tiny, simple system and weak operational maturity argue against decomposition.", + "tags": [ + "microservices-migration", + "team-topology", + "scaling", + "trade-offs" + ], + "references": [ + "Architecture Patterns", + "Design Principles", + "Case Studies" + ] + }, + { + "id": "systemdesign-q22", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An architect wants one diagram that shows the web app, mobile backend, auth service, database, cache, and message queue inside the system boundary, but not the internal classes of those services.", + "prompt": "Which C4 diagram is the best fit?", + "options": [ + "Container diagram", + "System Context diagram", + "Component diagram", + "Code diagram" + ], + "correctAnswer": 0, + "explanation": "A Container diagram is the right level when you need the main deployable pieces and data stores inside the system, without dropping into class-level or implementation detail.", + "tags": [ + "container-diagram", + "c4-model", + "visualization", + "system-boundary" + ], + "references": [ + "Visualization" + ] + }, + { + "id": "systemdesign-q23", + "type": "matching", + "difficulty": "expert", + "scenario": "A principal engineer is choosing a default pattern for several new initiatives.", + "prompt": "Match each project to the architecture that best fits its dominant constraints.", + "premises": [ + "A five-person team needs an MVP in eight weeks with one shared transactional database", + "An upload should trigger transcoding, moderation, analytics, and notifications independently", + "A webhook processor sees spiky traffic and runs short stateless handlers", + "A global platform has many teams, domain boundaries, and a need for independent deployments" + ], + "responses": [ + "Monolithic architecture", + "Event-driven architecture", + "Serverless architecture", + "Microservices architecture" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "The right architecture depends on constraints. Small, fast-moving, transactional MVPs fit monoliths; multi-consumer workflows fit event-driven systems; spiky stateless handlers fit serverless; and large multi-team platforms often justify microservices.", + "tags": [ + "architecture-selection", + "monolith", + "event-driven", + "serverless", + "microservices" + ], + "references": [ + "Architecture Patterns", + "Case Studies" + ] + }, + { + "id": "systemdesign-q24", + "type": "true-false", + "difficulty": "expert", + "scenario": "A team claims architecture governance is complete once diagrams are approved and stored in a wiki.", + "prompt": "True or false: fitness functions are intended to continuously verify architectural characteristics, not just document them once.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Fitness functions translate architectural intent into repeatable automated checks. They exist precisely because static documentation alone does not keep real systems aligned with their intended qualities.", + "tags": [ + "fitness-functions", + "governance", + "automation", + "architecture-drift" + ], + "references": [ + "Design Principles", + "Visualization" + ] + }, + { + "id": "systemdesign-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A streaming company knows that the same few episodes will dominate traffic in a region right after release. The goal is to reduce startup latency and shield the origin from repeated identical requests.", + "prompt": "Which tactic will produce the biggest benefit first?", + "options": [ + "Pre-position hot content in edge caches close to viewers", + "Increase write consistency on the user-profile database", + "Replace the CDN with a single larger origin cluster", + "Move all processing into one monolith" + ], + "correctAnswer": 0, + "explanation": "When demand is highly repetitive and geographically concentrated, serving hot content from edge caches is the fastest way to reduce startup time and origin load.", + "tags": [ + "case-study", + "cdn", + "predictive-caching", + "latency" + ], + "references": [ + "Case Studies", + "Scaling Strategies" + ] + }, + { + "id": "systemdesign-q26", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A recommendation service calls profile, catalog, and ranking dependencies. When catalog slows down, latency spikes propagate and unrelated requests begin to fail.", + "prompt": "Which measures reduce blast radius in this kind of distributed call graph?", + "options": [ + "Use timeouts and circuit breakers around downstream calls", + "Isolate critical resources with bulkheads", + "Retry immediately and indefinitely from every layer", + "Offload non-critical work to async queues instead of holding synchronous request threads", + "Wrap every remote call in a synchronous distributed transaction" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Circuit breakers, timeouts, and bulkheads are classic containment tools, and async offloading prevents synchronous paths from absorbing avoidable work. Infinite layered retries and distributed transactions usually amplify failure instead of containing it.", + "tags": [ + "circuit-breaker", + "bulkhead", + "async-processing", + "failure-isolation" + ], + "references": [ + "Distributed Systems", + "Design Principles", + "Case Studies" + ] + }, + { + "id": "systemdesign-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "Six months after a controversial migration, a new tech lead needs to understand why the team accepted operational complexity to gain deployment independence.", + "prompt": "Which artifact is most likely to preserve that reasoning?", + "options": [ + "Architecture Decision Record (ADR)", + "A rotated on-call schedule", + "A CDN cache report", + "A load balancer health dashboard" + ], + "correctAnswer": 0, + "explanation": "An ADR exists to capture context, decision, consequences, and trade-offs so later engineers can understand why a path was chosen. Operational artifacts show runtime facts, not architectural reasoning.", + "tags": [ + "adr", + "decision-history", + "governance", + "trade-offs" + ], + "references": [ + "Design Principles" + ] + }, + { + "id": "systemdesign-q28", + "type": "ordering", + "difficulty": "expert", + "scenario": "You are tracing a cacheable read request through a layered delivery stack.", + "prompt": "Arrange the lookup layers from closest to the user to deepest in the backend path.", + "items": [ + "Application cache", + "Primary database", + "Browser cache", + "CDN edge cache" + ], + "correctAnswer": [ + "Browser cache", + "CDN edge cache", + "Application cache", + "Primary database" + ], + "explanation": "A well-layered read path checks the cheapest and closest caches first, then progressively falls back to deeper system layers. That ordering minimizes both latency and backend load.", + "tags": [ + "cache-hierarchy", + "browser-cache", + "cdn", + "application-cache" + ], + "references": [ + "Scaling Strategies", + "Case Studies" + ] + }, + { + "id": "systemdesign-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A service called BillingManager calculates invoices, sends reminder emails, formats PDFs, and writes audit logs. Small policy changes now force risky edits across unrelated behavior.", + "prompt": "Which design principle is being violated most directly?", + "options": [ + "Single Responsibility principle", + "CAP theorem", + "Least connections", + "CQRS" + ], + "correctAnswer": 0, + "explanation": "The service clearly has multiple unrelated reasons to change. That is the essence of violating Single Responsibility, which in turn makes testing and maintenance harder.", + "tags": [ + "single-responsibility", + "solid", + "maintainability", + "coupling" + ], + "references": [ + "Design Principles" + ] + }, + { + "id": "systemdesign-q30", + "type": "true-false", + "difficulty": "expert", + "scenario": "A non-technical stakeholder wants to understand system boundaries and external dependencies before funding a redesign.", + "prompt": "True or false: a code-level diagram is the best first artifact for that audience.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Code-level diagrams are far too detailed for that audience. Stakeholders evaluating boundaries and external relationships should start with higher-level visualizations such as a System Context view.", + "tags": [ + "stakeholder-communication", + "c4-model", + "code-diagram", + "abstraction" + ], + "references": [ + "Visualization", + "Introduction" + ] + }, + { + "id": "systemdesign-q31", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A global subscription platform can tolerate slightly stale recommendation results, but billing balances and renewal state must never drift across concurrent updates.", + "prompt": "Which overall design approach is the most defensible?", + "options": [ + "Use strong consistency or ACID-style guarantees for billing, while allowing eventual consistency in less critical read-heavy domains", + "Force every subsystem into the same eventual-consistency model for simplicity", + "Route all traffic through one server to avoid trade-offs", + "Use CDN caching as the primary correctness mechanism for payments" + ], + "correctAnswer": 0, + "explanation": "Different domains often deserve different consistency models. Critical financial state should favor correctness guarantees, while less critical read-heavy features can trade some freshness for scale and availability.", + "tags": [ + "consistency-models", + "bounded-contexts", + "acid-vs-base", + "domain-trade-offs" + ], + "references": [ + "Distributed Systems", + "Design Principles", + "Case Studies" + ] + } + ] +} \ No newline at end of file diff --git a/quiz-banks/typescript.quiz.json b/quiz-banks/typescript.quiz.json new file mode 100644 index 0000000..f5add67 --- /dev/null +++ b/quiz-banks/typescript.quiz.json @@ -0,0 +1,876 @@ +{ + "moduleId": "typescript", + "moduleTitle": "TypeScript", + "description": "Scenario-based assessment focused on advanced TypeScript type-system reasoning, OOP modeling, migration strategy, and compile-time versus runtime tradeoffs.", + "version": 1, + "timeLimitMinutes": 10, + "questionsPerAttempt": 5, + "questions": [ + { + "id": "typescript-q01", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A reducer handles a discriminated union of commands. After a new command variant was added, one code path silently fell through to a generic fallback and production metrics were wrong.", + "prompt": "Which change most directly turns the missing case into a compile-time error the next time the union grows?", + "options": [ + "Replace the fallback with an assertNever-style check on the remaining value", + "Cast the fallback value to any before returning", + "Move the switch into a try/catch", + "Add a default branch that returns undefined" + ], + "correctAnswer": 0, + "explanation": "A never-based exhaustiveness check forces the compiler to prove that no union members remain. If a new variant is added and not handled, the remaining value is no longer never and the code stops compiling.", + "tags": [ + "discriminated-unions", + "exhaustiveness", + "never", + "reducers" + ], + "references": [ + "Advanced Types", + "Best Practices" + ] + }, + { + "id": "typescript-q02", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is migrating a dynamic payments module from JavaScript to TypeScript under a strict configuration, and they want the migration to expose genuine defects rather than hide them behind assertions.", + "prompt": "Which actions support that goal?", + "options": [ + "Enable noImplicitAny so untyped parameters and variables are surfaced", + "Treat external payloads as unknown and validate them before use", + "Add broad as any casts around failing branches to keep velocity high", + "Enable strictNullChecks so absent values cannot flow unchecked", + "Model dynamic dictionaries as Record wherever shapes are unclear" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Strict compiler settings and unknown at trust boundaries reveal unsound assumptions. Broad any and assertion-heavy modeling suppress the very defects the migration should uncover.", + "tags": [ + "migration", + "strict-mode", + "unknown", + "null-safety" + ], + "references": [ + "Migration Guide", + "Best Practices", + "Type Guards" + ] + }, + { + "id": "typescript-q03", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A shared config object is declared with readonly properties in TypeScript, then passed to older JavaScript utilities that mutate objects in place.", + "prompt": "True or false: the readonly modifier alone prevents those runtime mutations after compilation.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "readonly is a compile-time restriction. TypeScript does not freeze the emitted JavaScript object, so plain JavaScript code can still mutate it unless runtime immutability is added separately.", + "tags": [ + "readonly", + "runtime-vs-compile-time", + "immutability" + ], + "references": [ + "TypeScript vs JavaScript", + "Best Practices" + ] + }, + { + "id": "typescript-q04", + "type": "ordering", + "difficulty": "expert", + "scenario": "A webhook handler receives untrusted JSON from several partners. You need a flow that preserves type safety all the way to variant-specific logic.", + "prompt": "Arrange the safest TypeScript workflow from earliest to latest.", + "items": [ + "Receive the parsed payload as unknown", + "Check that the value is an object with the expected outer shape", + "Narrow on a discriminant or other validated field", + "Access variant-specific properties only inside the narrowed branch", + "Use a never-based exhaustiveness check for remaining variants" + ], + "correctAnswer": [ + "Receive the parsed payload as unknown", + "Check that the value is an object with the expected outer shape", + "Narrow on a discriminant or other validated field", + "Access variant-specific properties only inside the narrowed branch", + "Use a never-based exhaustiveness check for remaining variants" + ], + "explanation": "The safe sequence is to begin with unknown, validate structure, narrow by evidence, access specific fields only after narrowing, and finish with exhaustiveness checking so future variants are not silently missed.", + "tags": [ + "unknown", + "narrowing", + "webhooks", + "control-flow" + ], + "references": [ + "Type Guards", + "Advanced Types", + "TypeScript Visualization" + ] + }, + { + "id": "typescript-q05", + "type": "matching", + "difficulty": "expert", + "scenario": "During a type-system review, four language features are being matched to the problem each one solves best.", + "prompt": "Match each feature to its primary purpose.", + "premises": [ + "never", + "unknown", + "satisfies", + "as const" + ], + "responses": [ + "Represents values that should be impossible after exhaustive narrowing", + "Represents an input that must be checked before use", + "Verifies compatibility without widening away the expression's own literal information", + "Preserves literal types and readonly-ness for a value expression" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "never is for impossible states, unknown is the safe top type for unchecked data, satisfies validates shape while preserving the original expression type, and as const preserves literal precision on values.", + "tags": [ + "never", + "unknown", + "satisfies", + "const-assertions" + ], + "references": [ + "Advanced TypeScript", + "Advanced Types", + "Best Practices" + ] + }, + { + "id": "typescript-q06", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A library author stores an API token on class instances and wants a guarantee that consumers cannot reach it through normal property access, structural typing, or subclass field collisions.", + "prompt": "Which feature provides the strongest runtime-backed encapsulation?", + "options": [ + "A JavaScript private field declared as #token", + "A TypeScript private member declared as token: string", + "A protected member declared as token: string", + "A readonly member declared as token: string" + ], + "correctAnswer": 0, + "explanation": "JavaScript private fields use runtime-enforced encapsulation. TypeScript private is checked only by the type system, while protected and readonly solve different problems.", + "tags": [ + "private-fields", + "encapsulation", + "classes", + "runtime" + ], + "references": [ + "Encapsulation", + "Classes & Objects", + "TypeScript vs JavaScript" + ] + }, + { + "id": "typescript-q07", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "An analytics pipeline filters an array of union results to keep only successes, but the next map call still sees the element type as the full union.", + "prompt": "Which helper signature most directly teaches the compiler that the filter narrows the array?", + "options": [ + "function isSuccess(value: Result): value is SuccessResult", + "function isSuccess(value: Result): boolean", + "function isSuccess(value: T): T", + "function isSuccess(value: any): asserts value" + ], + "correctAnswer": 0, + "explanation": "A user-defined type predicate tells TypeScript exactly which subtype remains after the predicate returns true. A plain boolean return does not carry narrowing information across array methods.", + "tags": [ + "type-predicates", + "filter", + "unions", + "narrowing" + ], + "references": [ + "Type Guards", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q08", + "type": "multi-select", + "difficulty": "expert", + "scenario": "You are designing a generic configuration registry that loads unknown JSON, offers typed property access, and may miss lookups at runtime.", + "prompt": "Which API design choices improve type safety without breaking the relationship between inputs and outputs?", + "options": [ + "Constrain the stored type parameter, for example T extends BaseConfig", + "Use a key parameter K extends keyof T and return T[K] from property readers", + "Cast JSON.parse(...) directly to T at the trust boundary with no validation", + "Return T | undefined for lookups that may not find an entry", + "Default unresolved type parameters to any so callers rarely see errors" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Constraints, keyed generics, and honest undefined modeling preserve type relationships safely. Blind assertions and defaulting to any erase useful guarantees instead of strengthening them.", + "tags": [ + "generics", + "keyof", + "indexed-access", + "api-design" + ], + "references": [ + "Generics", + "Type Guards", + "Best Practices" + ] + }, + { + "id": "typescript-q09", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A teaching framework needs to wrap class methods for tracing so students can observe call timing without changing every call site.", + "prompt": "Which TypeScript feature is intended for attaching runtime behavior like that to a class member?", + "options": [ + "A method decorator", + "A conditional type", + "Declaration merging", + "A namespace import alias" + ], + "correctAnswer": 0, + "explanation": "Method decorators are designed for applying runtime behavior or metadata to class members. Conditional types and declaration merging are type-system or declaration-structure features, not runtime wrappers.", + "tags": [ + "decorators", + "methods", + "instrumentation", + "classes" + ], + "references": [ + "Decorators", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q10", + "type": "true-false", + "difficulty": "expert", + "scenario": "A developer tells the team that once a class implements an interface, they can later check that interface with instanceof in production code.", + "prompt": "True or false: implemented interfaces survive in emitted JavaScript as runtime values that instanceof can inspect.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "Interfaces are erased during compilation and do not exist at runtime. instanceof works with runtime constructor functions or classes, not erased TypeScript interfaces.", + "tags": [ + "interfaces", + "runtime-vs-compile-time", + "instanceof", + "oop" + ], + "references": [ + "TypeScript vs JavaScript", + "OOP Fundamentals" + ] + }, + { + "id": "typescript-q11", + "type": "ordering", + "difficulty": "expert", + "scenario": "A teammate is confused about why a generic pluck helper returns different types for different keys without using overloads.", + "prompt": "Arrange the type-resolution steps from earliest to latest.", + "items": [ + "Infer the object type T from the object argument", + "Infer the specific key type K from the key argument", + "Check that K is assignable to keyof T", + "Compute the indexed access result type T[K]", + "Return a value typed with that computed property type" + ], + "correctAnswer": [ + "Infer the object type T from the object argument", + "Infer the specific key type K from the key argument", + "Check that K is assignable to keyof T", + "Compute the indexed access result type T[K]", + "Return a value typed with that computed property type" + ], + "explanation": "The compiler first infers the participating generic types from the arguments, verifies the key constraint, computes the property lookup type T[K], and then uses that result as the function's return type.", + "tags": [ + "generics", + "inference", + "keyof", + "indexed-access" + ], + "references": [ + "Generics", + "Advanced TypeScript", + "TypeScript Visualization" + ] + }, + { + "id": "typescript-q12", + "type": "matching", + "difficulty": "expert", + "scenario": "A reviewer wants to map several advanced utility types to the transformations they perform on a domain model.", + "prompt": "Match each utility type to its effect.", + "premises": [ + "Exclude", + "Extract", + "NonNullable", + "Record" + ], + "responses": [ + "Removes from T the members assignable to U", + "Keeps from T only the members assignable to U", + "Removes null and undefined from a type", + "Builds an object type from a key union and a value type" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Exclude subtracts assignable members, Extract keeps them, NonNullable removes nullish values, and Record constructs an object type whose keys come from a union and whose values share one type.", + "tags": [ + "utility-types", + "exclude", + "extract", + "record" + ], + "references": [ + "Advanced Types", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q13", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A fluent query builder has a base class method select(), but after calling it on a subclass instance the chain loses subclass-specific methods.", + "prompt": "Which return type on the base-class fluent methods preserves polymorphic chaining for subclasses?", + "options": [ + "this", + "BaseBuilder", + "unknown", + "never" + ], + "correctAnswer": 0, + "explanation": "The special this type is polymorphic over subclasses. It allows a base method to return the most specific current instance type so subclass-only methods remain available in the chain.", + "tags": [ + "polymorphic-this", + "fluent-apis", + "classes", + "polymorphism" + ], + "references": [ + "Polymorphism", + "Classes & Objects", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q14", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A generic serializer uses conditional types over unions and the resulting types are unexpectedly distributed member by member.", + "prompt": "Which statements about distributive conditional types are correct?", + "options": [ + "Wrapping both sides in tuples, as in [T] extends [U], disables distribution", + "A conditional type with a naked type parameter can distribute over union members", + "Using infer inside a conditional type is forbidden when unions are involved", + "T extends unknown ? X : Y will distribute when T is a union", + "Adding an extends constraint to T automatically disables distribution" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Distribution happens when a naked type parameter is checked against a condition. Tuple-wrapping suppresses that behavior, while infer remains valid and constraints alone do not turn distribution off.", + "tags": [ + "conditional-types", + "distribution", + "unions", + "advanced-generics" + ], + "references": [ + "Generics", + "Advanced Types" + ] + }, + { + "id": "typescript-q15", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A route table object should be checked against an interface so missing keys are reported, but each path string must remain a literal type for downstream inference.", + "prompt": "Which operator best fits that requirement?", + "options": [ + "satisfies", + "as", + "keyof", + "typeof" + ], + "correctAnswer": 0, + "explanation": "satisfies verifies that the value conforms to the target shape while preserving the expression's own inferred type. A cast with as can hide errors and usually loses the precision you want to keep.", + "tags": [ + "satisfies", + "literal-types", + "configuration", + "inference" + ], + "references": [ + "Advanced TypeScript", + "Best Practices" + ] + }, + { + "id": "typescript-q16", + "type": "single-choice", + "difficulty": "expert", + "scenario": "You are wrapping JSON.parse for a security-sensitive ingestion pipeline and want every consumer to prove safety before touching fields.", + "prompt": "What should the wrapper return instead of any?", + "options": [ + "unknown", + "never", + "object", + "{}" + ], + "correctAnswer": 0, + "explanation": "unknown forces downstream code to validate or narrow before property access. any would silently permit unsafe operations and undermine the point of the wrapper.", + "tags": [ + "unknown", + "trust-boundaries", + "json", + "validation" + ], + "references": [ + "Type Guards", + "Best Practices", + "Migration Guide" + ] + }, + { + "id": "typescript-q17", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A checkout platform supports multiple payment providers. The team wants swappable implementations, shared workflow steps, and compiler help when subclasses drift from the base contract.", + "prompt": "Which design choices best support that architecture?", + "options": [ + "Create an abstract PaymentProvider base class for shared lifecycle logic", + "Expose credentials on public mutable fields so tests can patch them directly", + "Define an interface for the capabilities consumed by the checkout service", + "Mark subclass methods with override where they replace base behavior", + "Let callers update retry counters through public state on each provider" + ], + "correctAnswer": [ + 0, + 2, + 3 + ], + "explanation": "An abstract base class shares implementation, an interface supports substitution at the call site, and override helps catch accidental drift. Public mutable internals weaken encapsulation and invite invalid states.", + "tags": [ + "abstraction", + "inheritance", + "interfaces", + "override" + ], + "references": [ + "Abstraction", + "Inheritance", + "Best Practices" + ] + }, + { + "id": "typescript-q18", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A subclass method was meant to replace a base implementation, but a small signature mismatch created a new method instead and the bug survived review.", + "prompt": "Which TypeScript feature most directly catches that mistake at the subclass declaration?", + "options": [ + "The override modifier with noImplicitOverride", + "A readonly modifier", + "An enum discriminator", + "A namespace export" + ], + "correctAnswer": 0, + "explanation": "override tells the compiler a member is intended to replace a base-class member. With noImplicitOverride enabled, TypeScript catches accidental non-overrides and missing override keywords.", + "tags": [ + "override", + "inheritance", + "compiler-options", + "safety" + ], + "references": [ + "Inheritance", + "Best Practices" + ] + }, + { + "id": "typescript-q19", + "type": "true-false", + "difficulty": "advanced", + "scenario": "Two teams compare builds and notice that one version added several interfaces and type aliases but kept the executable logic identical.", + "prompt": "True or false: if the programs differ only in erased type constructs, the emitted JavaScript can still be identical.", + "options": [ + "True", + "False" + ], + "correctAnswer": 0, + "explanation": "Most TypeScript-specific type constructs are erased during compilation. If only those erased constructs change, the runtime JavaScript output can remain exactly the same.", + "tags": [ + "type-erasure", + "emit", + "interfaces", + "compile-time" + ], + "references": [ + "TypeScript vs JavaScript", + "Introduction" + ] + }, + { + "id": "typescript-q20", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A persistence layer should convert a partially edited draft type into one where every property is required before a database write.", + "prompt": "Which mapped type form removes optional modifiers from every property in T?", + "options": [ + "{ [K in keyof T]-?: T[K] }", + "{ [K in keyof T]+?: T[K] }", + "{ readonly [K in keyof T]: T[K] }", + "{ [K in keyof T]: T | undefined }" + ], + "correctAnswer": 0, + "explanation": "The -? mapped modifier removes optionality from each property. The other forms either add optionality, add readonly, or change the property type altogether.", + "tags": [ + "mapped-types", + "optionality", + "advanced-types", + "drafts" + ], + "references": [ + "Advanced Types" + ] + }, + { + "id": "typescript-q21", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A visualization of control-flow narrowing shows that a union payload is still too wide in nested branches, and the team wants patterns that give the compiler more evidence.", + "prompt": "Which techniques improve narrowing in that situation?", + "options": [ + "Add a stable discriminant property with distinct literal values on each variant", + "Use a user-defined predicate returning value is SpecificVariant", + "Replace unknown with any at the boundary so the compiler stops resisting", + "Use checks like 'kind' in value or equality tests on the discriminant before property access", + "Cast each branch with as SpecificVariant even when no runtime evidence exists" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "Control-flow analysis relies on evidence such as discriminants, in checks, equality checks, and user-defined predicates. any and assertion-heavy code remove evidence instead of improving narrowing.", + "tags": [ + "type-guards", + "control-flow-analysis", + "discriminated-unions", + "narrowing" + ], + "references": [ + "Type Guards", + "Advanced Types", + "TypeScript Visualization" + ] + }, + { + "id": "typescript-q22", + "type": "single-choice", + "difficulty": "advanced", + "scenario": "A method decorator adds runtime logging around a service method, and a teammate expects that decoration alone to tighten the method's static call signature throughout the codebase.", + "prompt": "Which statement is most accurate?", + "options": [ + "Decorators can wrap runtime behavior, but they do not automatically teach the type system a new call contract", + "Decorators are erased before runtime and only affect static types", + "Decorators guarantee exhaustive checking for union parameters", + "Decorators convert private members into public ones" + ], + "correctAnswer": 0, + "explanation": "Decorators operate in the runtime layer. They can alter behavior or attach metadata, but static typing still depends on the declared types unless you explicitly model changes in the type system.", + "tags": [ + "decorators", + "runtime", + "static-types", + "methods" + ], + "references": [ + "Decorators", + "TypeScript vs JavaScript", + "Best Practices" + ] + }, + { + "id": "typescript-q23", + "type": "single-choice", + "difficulty": "expert", + "scenario": "An event bus should only allow channel names composed from a domain union and an action union, such as user:created or billing:failed.", + "prompt": "Which TypeScript feature models that string space most directly?", + "options": [ + "Template literal types", + "Namespaces", + "Decorators", + "Module augmentation" + ], + "correctAnswer": 0, + "explanation": "Template literal types let you construct new string literal unions from other unions. That makes them the natural fit for constrained channel naming schemes.", + "tags": [ + "template-literal-types", + "string-unions", + "event-bus", + "advanced-types" + ], + "references": [ + "Advanced Types", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q24", + "type": "matching", + "difficulty": "expert", + "scenario": "During an architecture review, the team is pairing object-oriented principles with concrete TypeScript design decisions.", + "prompt": "Match each principle to the best scenario.", + "premises": [ + "Encapsulation", + "Abstraction", + "Polymorphism", + "Inheritance" + ], + "responses": [ + "Hide internal state behind methods and restricted member visibility", + "Expose a stable contract while leaving implementation details to derived classes or implementations", + "Use a base-typed reference to call different concrete implementations through the same API", + "Create a subclass that reuses and extends behavior from a parent class" + ], + "correctAnswer": [ + 0, + 1, + 2, + 3 + ], + "explanation": "Encapsulation controls access to state, abstraction focuses on the contract, polymorphism enables one interface with multiple concrete behaviors, and inheritance reuses and extends parent behavior.", + "tags": [ + "oop", + "encapsulation", + "abstraction", + "polymorphism", + "inheritance" + ], + "references": [ + "OOP Fundamentals", + "Encapsulation", + "Abstraction", + "Polymorphism", + "Inheritance" + ] + }, + { + "id": "typescript-q25", + "type": "single-choice", + "difficulty": "expert", + "scenario": "You are writing a helper that reads a property from an object and want the return type to stay linked to the specific key passed by the caller.", + "prompt": "Which signature preserves that relationship most precisely?", + "options": [ + "function pluck(obj: T, key: K): T[K]", + "function pluck(obj: T, key: string): unknown", + "function pluck(obj: object, key: PropertyKey): any", + "function pluck(obj: T, key: keyof T): T" + ], + "correctAnswer": 0, + "explanation": "The combination of K extends keyof T and the indexed access return type T[K] preserves the exact relationship between the chosen key and the resulting property type.", + "tags": [ + "generics", + "keyof", + "indexed-access", + "api-design" + ], + "references": [ + "Generics", + "Advanced TypeScript" + ] + }, + { + "id": "typescript-q26", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A type-level utility must extract nested pieces from promises and callback signatures without changing runtime code.", + "prompt": "Which statements about infer are correct?", + "options": [ + "Promise can capture the resolved value type inside a conditional type", + "infer introduces a type variable that exists only within the conditional type branch", + "infer cannot be used with tuple or function-pattern matching", + "Nested conditional types can use infer to extract pieces such as callback arguments or return types", + "Using infer changes the emitted JavaScript for the generic function" + ], + "correctAnswer": [ + 0, + 1, + 3 + ], + "explanation": "infer is a type-level pattern-matching tool within conditional types. It can extract pieces from promises, tuples, functions, and other structured types, but it does not alter emitted JavaScript.", + "tags": [ + "infer", + "conditional-types", + "promises", + "callbacks" + ], + "references": [ + "Advanced Types", + "Generics" + ] + }, + { + "id": "typescript-q27", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A JavaScript application consumes your compiled library. The team wants editors and downstream builds to see the public TypeScript shapes even though interfaces are erased at runtime.", + "prompt": "Which artifact should you ship for that tooling experience?", + "options": [ + ".d.ts declaration files", + "Source maps", + "tsbuildinfo files", + "Decorator metadata" + ], + "correctAnswer": 0, + "explanation": "Declaration files preserve the public type surface for consumers and tooling. Source maps support debugging, tsbuildinfo supports incremental compilation, and decorator metadata is unrelated to general API typing.", + "tags": [ + "declarations", + "library-authoring", + "tooling", + "emit" + ], + "references": [ + "TypeScript vs JavaScript", + "Migration Guide" + ] + }, + { + "id": "typescript-q28", + "type": "true-false", + "difficulty": "advanced", + "scenario": "A teammate applies as const to a nested settings object and assumes no one can mutate the actual object at runtime.", + "prompt": "True or false: as const freezes the object in emitted JavaScript.", + "options": [ + "True", + "False" + ], + "correctAnswer": 1, + "explanation": "as const narrows the type of the expression and marks properties readonly in the type system, but it does not call Object.freeze or otherwise enforce runtime immutability.", + "tags": [ + "const-assertions", + "immutability", + "runtime-vs-compile-time" + ], + "references": [ + "Advanced Types", + "Best Practices", + "TypeScript vs JavaScript" + ] + }, + { + "id": "typescript-q29", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A base class has a member that subclasses should use, but external callers must not access it directly.", + "prompt": "Which visibility modifier best communicates that requirement?", + "options": [ + "protected", + "public", + "private", + "readonly" + ], + "correctAnswer": 0, + "explanation": "protected exposes a member to the declaring class and its subclasses while hiding it from outside callers. private would also hide it from subclasses, and readonly is unrelated to visibility.", + "tags": [ + "protected", + "visibility", + "inheritance", + "encapsulation" + ], + "references": [ + "Encapsulation", + "Inheritance" + ] + }, + { + "id": "typescript-q30", + "type": "single-choice", + "difficulty": "expert", + "scenario": "A notification system needs interchangeable implementations, but all notifiers should inherit common retry and telemetry logic from one shared place.", + "prompt": "Which design is the best fit?", + "options": [ + "An abstract base class that can share implementation while defining required operations", + "A plain interface, because interfaces can provide shared method bodies", + "A union type of concrete notifier classes with no shared parent", + "A namespace containing helper functions and global mutable state" + ], + "correctAnswer": 0, + "explanation": "An abstract base class is the right choice when you need both a common contract and shared implementation. Interfaces define shape but cannot carry the reusable method bodies described in the scenario.", + "tags": [ + "abstraction", + "abstract-classes", + "shared-implementation", + "oop" + ], + "references": [ + "Abstraction", + "OOP Fundamentals", + "Classes & Objects" + ] + }, + { + "id": "typescript-q31", + "type": "multi-select", + "difficulty": "expert", + "scenario": "A team is preparing a long-lived TypeScript codebase for scale and wants review rules that reduce unsoundness instead of merely silencing diagnostics.", + "prompt": "Which practices align with that goal?", + "options": [ + "Prefer unknown over any at trust boundaries", + "Model impossible combinations with discriminated unions instead of loosely related flags", + "Use broad type assertions to bypass edge cases during review", + "Enable strict compiler options and patterns such as override and exhaustiveness checks", + "Use satisfies when validating config objects so literal information is preserved" + ], + "correctAnswer": [ + 0, + 1, + 3, + 4 + ], + "explanation": "These practices preserve useful compiler feedback and make invalid states harder to represent. Broad assertions do the opposite by discarding evidence and hiding real problems.", + "tags": [ + "best-practices", + "strictness", + "discriminated-unions", + "satisfies", + "unknown" + ], + "references": [ + "Best Practices", + "Migration Guide", + "Advanced Types" + ] + } + ] +} \ No newline at end of file diff --git a/src/components/Sidebar.tsx b/src/components/Sidebar.tsx index bfd7c47..d81c709 100644 --- a/src/components/Sidebar.tsx +++ b/src/components/Sidebar.tsx @@ -3,6 +3,10 @@ import { Link, useLocation } from 'react-router-dom'; import { ChevronRight, ChevronDown } from 'lucide-react'; import { getSectionTheme } from '../utils/theme'; import { useUI } from '../shared/contexts'; +import { + getModuleIdFromPathname, + getSectionQueryValue, +} from '../shared/constants/moduleNavigation'; // Helper function to get theme color classes const getThemeColorClass = ( @@ -390,57 +394,30 @@ const Sidebar: React.FC = () => { const touchStartX = useRef(0); const touchCurrentX = useRef(0); - // Determine section from pathname - const getSectionFromPath = ( - path: string - ): - | 'javascript' - | 'git' - | 'datastructures' - | 'rxjs' - | 'react' - | 'nextjs' - | 'bigo' - | 'python' - | 'systemdesign' - | 'typescript' - | 'ai' - | 'nodejs' - | 'devops' - | 'auth' => { - if (path.includes('/devops')) return 'devops'; - if (path.includes('/auth')) return 'auth'; - if (path.includes('/nodejs')) return 'nodejs'; - if (path.includes('/ai')) return 'ai'; - if (path.includes('javascript')) return 'javascript'; - if (path.includes('python')) return 'python'; - if (path.includes('react')) return 'react'; - if (path.includes('nextjs')) return 'nextjs'; - if (path.includes('git')) return 'git'; - if (path.includes('datastructures')) return 'datastructures'; - if (path.includes('rxjs')) return 'rxjs'; - if (path.includes('bigo')) return 'bigo'; - if (path.includes('systemdesign')) return 'systemdesign'; - if (path.includes('typescript')) return 'typescript'; - return 'javascript'; // default + const getSidebarItems = (path: string): SidebarItem[] => { + const baseItems = sidebarSections[path] || []; + return getModuleIdFromPathname(path) + ? [...baseItems, { label: 'Quiz', path: `${path}?section=Quiz` }] + : baseItems; }; - const section = getSectionFromPath(location.pathname); + const section = getModuleIdFromPathname(location.pathname) ?? 'javascript'; const theme = getSectionTheme(section); // Initialize expanded items based on current section const getInitialExpandedItems = () => { - const query = new URLSearchParams(location.search); - const currentSection = query.get('section') || 'Introduction'; const basePath = location.pathname; - const sections = sidebarSections[basePath] || []; + const sections = getSidebarItems(basePath); + const query = new URLSearchParams(location.search); + const currentSection = + query.get('section') || getSectionQueryValue(sections[0]?.path ?? '') || 'Introduction'; const expanded: string[] = []; sections.forEach((item) => { if (item.subItems) { const shouldExpand = - currentSection === item.label || - item.subItems.some((sub) => sub.label === currentSection); + getSectionQueryValue(item.path) === currentSection || + item.subItems.some((sub) => getSectionQueryValue(sub.path) === currentSection); if (shouldExpand) { expanded.push(item.label); } @@ -453,7 +430,11 @@ const Sidebar: React.FC = () => { // Get the base path (e.g., '/javascript') const basePath = location.pathname; - const sections = sidebarSections[basePath] || []; + const sections = getSidebarItems(basePath); + const currentSection = + new URLSearchParams(location.search).get('section') || + getSectionQueryValue(sections[0]?.path ?? '') || + 'Introduction'; const toggleExpanded = (label: string) => { setExpandedItems((prev) => @@ -537,12 +518,9 @@ const Sidebar: React.FC = () => { const renderMenuItem = (item: SidebarItem, isSubItem = false) => { const hasSubItems = item.subItems && item.subItems.length > 0; const expanded = isExpanded(item.label); - - // Check if this item is active based on current section query param - const query = new URLSearchParams(location.search); - const currentSection = query.get('section') || 'Introduction'; // Default to Introduction const isActive = - currentSection === item.label || item.subItems?.some((sub) => sub.label === currentSection); + getSectionQueryValue(item.path) === currentSection || + item.subItems?.some((sub) => getSectionQueryValue(sub.path) === currentSection); return (
  • diff --git a/src/features/ai/AIFundamentalsPage.tsx b/src/features/ai/AIFundamentalsPage.tsx index c5199ca..e5ce56f 100644 --- a/src/features/ai/AIFundamentalsPage.tsx +++ b/src/features/ai/AIFundamentalsPage.tsx @@ -1,5 +1,6 @@ import React, { Suspense } from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load section components for better performance import Introduction from './components/sections/Introduction'; @@ -13,6 +14,7 @@ const Generalization = React.lazy(() => import('./components/sections/Generaliza const TrainingVsInference = React.lazy(() => import('./components/sections/TrainingVsInference')); const WordEmbeddings = React.lazy(() => import('./components/sections/WordEmbeddings')); const RAGPipeline = React.lazy(() => import('./components/sections/RAGPipeline')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -26,6 +28,7 @@ const sectionComponents: Record = { 'Training vs Inference': TrainingVsInference, 'Word Embeddings': WordEmbeddings, 'RAG Pipeline': RAGPipeline, + Quiz, }; function useQuery(): URLSearchParams { diff --git a/src/features/auth/AuthPage.tsx b/src/features/auth/AuthPage.tsx index 0174b09..6b6223b 100644 --- a/src/features/auth/AuthPage.tsx +++ b/src/features/auth/AuthPage.tsx @@ -3,6 +3,7 @@ import { useLocation } from 'react-router-dom'; import { ErrorBoundary } from '../../shared/components/feedback/ErrorBoundary'; import { LoadingFallback } from '../../shared/components/feedback/LoadingFallback'; import { SEO } from '../../shared/components/SEO/SEO'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load all section components for better code splitting const Introduction = lazy(() => import('./components/sections/Introduction')); @@ -14,6 +15,7 @@ const PKCEFlow = lazy(() => import('./components/sections/PKCEFlow')); const BFFPattern = lazy(() => import('./components/sections/BFFPattern')); const AIAgentAuth = lazy(() => import('./components/sections/AIAgentAuth')); const Visualization = lazy(() => import('./components/sections/Visualization')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -25,6 +27,7 @@ const sectionComponents: Record = { 'BFF Pattern': BFFPattern, 'AI Agent Authentication': AIAgentAuth, Visualization, + Quiz, }; function useQuery(): URLSearchParams { diff --git a/src/features/bigo/BigOPage.tsx b/src/features/bigo/BigOPage.tsx index 4b850be..f03540c 100644 --- a/src/features/bigo/BigOPage.tsx +++ b/src/features/bigo/BigOPage.tsx @@ -1,5 +1,6 @@ import React from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Import Big-O sections as they are created import Introduction from './components/sections/Introduction'; import AdvancedConcepts from './components/sections/AdvancedConcepts'; @@ -11,6 +12,7 @@ import AlgorithmAnalysis from './components/sections/AlgorithmAnalysis'; import RealWorldApplications from './components/sections/RealWorldApplications'; import AdvancedTopics from './components/sections/AdvancedTopics'; import Playground from '../../components/playground/Playground'; +const Quiz = () => ; // import AlgorithmAnalysis from './components/sections/AlgorithmAnalysis'; // import RealWorldApplications from './components/sections/RealWorldApplications'; // import PracticeChallenges from './components/sections/PracticeChallenges'; @@ -51,6 +53,8 @@ const BigOPage: React.FC = () => { return ; case 'playground': return ; + case 'quiz': + return ; default: return ; } diff --git a/src/features/datastructures/DataStructuresPage.tsx b/src/features/datastructures/DataStructuresPage.tsx index e8cd77a..26d2aa0 100644 --- a/src/features/datastructures/DataStructuresPage.tsx +++ b/src/features/datastructures/DataStructuresPage.tsx @@ -3,6 +3,7 @@ import { useLocation } from 'react-router-dom'; import { ErrorBoundary } from '../../shared/components/feedback/ErrorBoundary'; import { LoadingFallback } from '../../shared/components/feedback/LoadingFallback'; import { SEO } from '../../shared/components/SEO/SEO'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load all section components for better code splitting const Introduction = lazy(() => import('./components/sections/Introduction')); @@ -33,6 +34,7 @@ const RealWorldApplications = lazy(() => import('./components/sections/RealWorld const PracticeProblems = lazy(() => import('./components/sections/PracticeProblems')); const Visualizations3D = lazy(() => import('./components/sections/Visualizations3D')); const Playground = lazy(() => import('../../components/playground/Playground')); +const Quiz = () => ; function useQuery() { return new URLSearchParams(useLocation().search); @@ -108,6 +110,8 @@ const DataStructuresPage: React.FC = () => { return ; case 'playground': return ; + case 'quiz': + return ; default: return ; } diff --git a/src/features/devops/DevOpsPage.tsx b/src/features/devops/DevOpsPage.tsx index b9af9a7..53a06f8 100644 --- a/src/features/devops/DevOpsPage.tsx +++ b/src/features/devops/DevOpsPage.tsx @@ -1,5 +1,6 @@ import React, { Suspense } from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load section components for better performance import Introduction from './components/sections/Introduction'; @@ -12,6 +13,7 @@ const ContainerOrchestration = React.lazy( const InfrastructureAsCode = React.lazy(() => import('./components/sections/InfrastructureAsCode')); const ModernDevRoles = React.lazy(() => import('./components/sections/ModernDevRoles')); const Observability = React.lazy(() => import('./components/sections/Observability')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -22,6 +24,7 @@ const sectionComponents: Record = { 'Infrastructure as Code': InfrastructureAsCode, 'Modern Dev Roles': ModernDevRoles, Observability, + Quiz, }; function useQuery(): URLSearchParams { diff --git a/src/features/git/GitPage.tsx b/src/features/git/GitPage.tsx index 13e9ffe..06dcb31 100644 --- a/src/features/git/GitPage.tsx +++ b/src/features/git/GitPage.tsx @@ -1,6 +1,7 @@ import React from 'react'; import { useLocation } from 'react-router-dom'; import { SEO } from '../../shared/components/SEO/SEO'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; import Introduction from './components/sections/Introduction'; import Architecture from './components/sections/Architecture'; import ThreeTreeModel from './components/sections/ThreeTreeModel'; @@ -12,6 +13,8 @@ import HistoryManagement from './components/sections/HistoryManagement'; import Troubleshooting from './components/sections/Troubleshooting'; import Visualization from './components/sections/Visualization'; +const Quiz = () => ; + const sectionComponents: Record = { Introduction, 'Git Architecture': Architecture, @@ -23,6 +26,7 @@ const sectionComponents: Record = { 'History Management': HistoryManagement, Troubleshooting, Visualization, + Quiz, }; function useQuery() { diff --git a/src/features/javascript/JavaScriptPage.tsx b/src/features/javascript/JavaScriptPage.tsx index a74e394..96fbff0 100644 --- a/src/features/javascript/JavaScriptPage.tsx +++ b/src/features/javascript/JavaScriptPage.tsx @@ -3,6 +3,7 @@ import { useLocation } from 'react-router-dom'; import { ErrorBoundary } from '../../shared/components/feedback/ErrorBoundary'; import { LoadingFallback } from '../../shared/components/feedback/LoadingFallback'; import { SEO } from '../../shared/components/SEO/SEO'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load all section components for better code splitting const Introduction = lazy(() => import('./components/sections/Introduction')); @@ -22,6 +23,7 @@ const JavaScriptRuntime = lazy(() => import('./components/sections/JavaScriptRun const WebAPIs = lazy(() => import('./components/sections/WebAPIs')); const TaskQueues = lazy(() => import('./components/sections/TaskQueues')); const V8Runtime = lazy(() => import('./components/sections/V8Runtime')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -44,6 +46,7 @@ const sectionComponents: Record = { 'Memory Management': MemoryManagement, 'Memory Leaks': MemoryLeaks, Visualization, + Quiz, }; function useQuery() { diff --git a/src/features/nextjs/NextjsPage.tsx b/src/features/nextjs/NextjsPage.tsx index 4327ba7..d9714f9 100644 --- a/src/features/nextjs/NextjsPage.tsx +++ b/src/features/nextjs/NextjsPage.tsx @@ -2,6 +2,7 @@ import React, { lazy, Suspense } from 'react'; import { useLocation } from 'react-router-dom'; import { ErrorBoundary } from '../../shared/components/feedback/ErrorBoundary'; import { LoadingFallback } from '../../shared/components/feedback/LoadingFallback'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load all section components for better code splitting const Introduction = lazy(() => import('./components/sections/Introduction')); @@ -11,6 +12,7 @@ const ServerClientComponents = lazy(() => import('./components/sections/ServerCl const DataFetching = lazy(() => import('./components/sections/DataFetching')); const Middleware = lazy(() => import('./components/sections/Middleware')); const Optimization = lazy(() => import('./components/sections/Optimization')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -20,6 +22,7 @@ const sectionComponents: Record = { 'Data Fetching & Mutations': DataFetching, 'Middleware & Route Handlers': Middleware, 'Optimization & Performance': Optimization, + Quiz, }; function useQuery() { diff --git a/src/features/nodejs/NodeJSPage.tsx b/src/features/nodejs/NodeJSPage.tsx index f6d38e2..f5c0221 100644 --- a/src/features/nodejs/NodeJSPage.tsx +++ b/src/features/nodejs/NodeJSPage.tsx @@ -1,5 +1,6 @@ import React, { Suspense } from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load section components for better performance import Introduction from './components/sections/Introduction'; @@ -12,6 +13,7 @@ const ModuleSystem = React.lazy(() => import('./components/sections/ModuleSystem const PackageManagers = React.lazy(() => import('./components/sections/PackageManagers')); const Frameworks = React.lazy(() => import('./components/sections/Frameworks')); const RuntimeWars = React.lazy(() => import('./components/sections/RuntimeWars')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -24,6 +26,7 @@ const sectionComponents: Record = { 'Package Managers': PackageManagers, Frameworks, 'Runtime Wars': RuntimeWars, + Quiz, }; function useQuery(): URLSearchParams { diff --git a/src/features/python/PythonPage.tsx b/src/features/python/PythonPage.tsx index 8e84874..54daad2 100644 --- a/src/features/python/PythonPage.tsx +++ b/src/features/python/PythonPage.tsx @@ -1,5 +1,6 @@ import React from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; import Introduction from './components/sections/Introduction'; import PythonPhilosophy from './components/sections/PythonPhilosophy'; import ExecutionModel from './components/sections/ExecutionModel'; @@ -9,6 +10,8 @@ import AdvancedConcepts from './components/sections/AdvancedConcepts'; // Import additional sections as they are created // import Mastery from './components/sections/Mastery'; +const Quiz = () => ; + const sectionComponents: Record = { Introduction, 'Python Philosophy': PythonPhilosophy, @@ -16,6 +19,7 @@ const sectionComponents: Record = { 'Memory Management': MemoryManagement, 'Global Interpreter Lock': GlobalInterpreterLock, 'Advanced Concepts': AdvancedConcepts, + Quiz, // Add more sections as they are implemented // Mastery: Mastery, }; diff --git a/src/features/react/ReactPage.tsx b/src/features/react/ReactPage.tsx index b34d8ae..4cf9b94 100644 --- a/src/features/react/ReactPage.tsx +++ b/src/features/react/ReactPage.tsx @@ -1,5 +1,6 @@ import React from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; import Introduction from './components/sections/Introduction'; import DOMFundamentals from './components/sections/DOMFundamentals'; import VirtualDOM from './components/sections/VirtualDOM'; @@ -9,6 +10,8 @@ import StateProps from './components/sections/StateProps'; import Hooks from './components/sections/Hooks'; import JSX from './components/sections/JSX'; +const Quiz = () => ; + const sectionComponents: Record = { Introduction, 'DOM Fundamentals': DOMFundamentals, @@ -18,6 +21,7 @@ const sectionComponents: Record = { 'State & Props': StateProps, Hooks, JSX, + Quiz, }; function useQuery() { diff --git a/src/features/rxjs/RxJSPage.tsx b/src/features/rxjs/RxJSPage.tsx index 47bf367..cf25c96 100644 --- a/src/features/rxjs/RxJSPage.tsx +++ b/src/features/rxjs/RxJSPage.tsx @@ -2,6 +2,7 @@ import React, { lazy, Suspense } from 'react'; import { useLocation } from 'react-router-dom'; import { ErrorBoundary } from '../../shared/components/feedback/ErrorBoundary'; import { LoadingFallback } from '../../shared/components/feedback/LoadingFallback'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load all section components for better code splitting const Introduction = lazy(() => import('./components/sections/Introduction')); @@ -15,6 +16,7 @@ const MarbleDiagrams = lazy(() => import('./components/sections/MarbleDiagrams') const ErrorHandling = lazy(() => import('./components/sections/ErrorHandling')); const RealWorldExamples = lazy(() => import('./components/sections/RealWorldExamples')); const VisualizationTool = lazy(() => import('./components/sections/VisualizationTool')); +const Quiz = () => ; // More imports will be added as we create the sections const sectionComponents: Record = { @@ -29,6 +31,7 @@ const sectionComponents: Record = { 'Error Handling': ErrorHandling, 'Real-World Examples': RealWorldExamples, 'Visualization Tool': VisualizationTool, + Quiz, // More sections will be added here as we implement them }; diff --git a/src/features/systemdesign/SystemDesignPage.tsx b/src/features/systemdesign/SystemDesignPage.tsx index 66431f1..41b8e00 100644 --- a/src/features/systemdesign/SystemDesignPage.tsx +++ b/src/features/systemdesign/SystemDesignPage.tsx @@ -1,16 +1,16 @@ import React, { Suspense } from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; // Lazy load section components for better performance import Introduction from './components/sections/Introduction'; -const ArchitecturePatterns = React.lazy( - () => import('./components/sections/ArchitecturePatterns') -); +const ArchitecturePatterns = React.lazy(() => import('./components/sections/ArchitecturePatterns')); const DistributedSystems = React.lazy(() => import('./components/sections/DistributedSystems')); const ScalingStrategies = React.lazy(() => import('./components/sections/ScalingStrategies')); const DesignPrinciples = React.lazy(() => import('./components/sections/DesignPrinciples')); const CaseStudies = React.lazy(() => import('./components/sections/CaseStudies')); const Visualization = React.lazy(() => import('./components/sections/Visualization')); +const Quiz = () => ; const sectionComponents: Record = { Introduction, @@ -20,6 +20,7 @@ const sectionComponents: Record = { 'Design Principles': DesignPrinciples, 'Case Studies': CaseStudies, Visualization, + Quiz, }; function useQuery() { diff --git a/src/features/typescript/TypeScriptPage.tsx b/src/features/typescript/TypeScriptPage.tsx index 7111471..7fbbd22 100644 --- a/src/features/typescript/TypeScriptPage.tsx +++ b/src/features/typescript/TypeScriptPage.tsx @@ -1,5 +1,6 @@ import React from 'react'; import { useLocation } from 'react-router-dom'; +import ModuleQuizSection from '../../shared/components/quiz/ModuleQuizSection'; import Introduction from './components/sections/Introduction'; import TypeScriptBasics from './components/sections/TypeScriptBasics'; import TypeScriptVsJavaScript from './components/sections/TypeScriptVsJavaScript'; @@ -21,6 +22,8 @@ import AdvancedTypes from './components/sections/AdvancedTypes'; import Generics from './components/sections/Generics'; import TypeScriptVisualization from './components/sections/TypeScriptVisualization'; +const Quiz = () => ; + const sectionComponents: Record = { Introduction, 'TypeScript Basics': TypeScriptBasics, @@ -42,6 +45,7 @@ const sectionComponents: Record = { 'Advanced Types': AdvancedTypes, Generics: Generics, 'TypeScript Visualization': TypeScriptVisualization, + Quiz, }; function useQuery() { diff --git a/src/shared/components/quiz/ModuleQuizSection.tsx b/src/shared/components/quiz/ModuleQuizSection.tsx new file mode 100644 index 0000000..8a0d631 --- /dev/null +++ b/src/shared/components/quiz/ModuleQuizSection.tsx @@ -0,0 +1,545 @@ +import React, { useEffect, useMemo, useRef } from 'react'; +import { Link } from 'react-router-dom'; +import { + AlarmClock, + ArrowLeft, + ArrowRight, + Brain, + Clock3, + Flag, + Play, + RefreshCcw, + Target, + Trophy, + XCircle, +} from 'lucide-react'; +import SectionLayout from '../../../components/shared/SectionLayout'; +import ThemeCard from '../../../components/shared/ThemeCard'; +import StatsGrid from '../../../components/shared/StatsGrid'; +import { LoadingFallback } from '../feedback/LoadingFallback'; +import { useToast } from '../feedback/Toast'; +import { useReducedMotion } from '../../hooks/useReducedMotion'; +import { useQuizSession } from '../../hooks/useQuizSession'; +import { learningModuleConfigs } from '../../constants/moduleNavigation'; +import type { LearningModuleId } from '../../constants/moduleNavigation'; +import type { QuizQuestion } from '../../../types/quiz'; +import { + formatQuizAnswer, + formatRemainingTime, + isQuizAnswerComplete, + resolveQuizReferences, +} from '../../../utils/quiz'; +import QuestionRenderer from './QuestionRenderer'; +import { getSectionTheme } from '../../../utils/theme'; + +interface ModuleQuizSectionProps { + moduleId: LearningModuleId; +} + +const getProgressTone = (scorePercentage: number): string => { + if (scorePercentage >= 85) { + return 'text-green-700'; + } + if (scorePercentage >= 60) { + return 'text-blue-700'; + } + return 'text-rose-700'; +}; + +const ModuleQuizSection: React.FC = ({ moduleId }) => { + const moduleConfig = learningModuleConfigs[moduleId]; + const theme = getSectionTheme(moduleConfig.theme); + const prefersReducedMotion = useReducedMotion(); + const { showToast } = useToast(); + const latestToastResultId = useRef(null); + const { + bank, + isLoading, + loadError, + activeAttempt, + questionsForAttempt, + currentQuestion, + currentQuestionIndex, + answeredQuestions, + remainingSeconds, + recentResults, + latestResult, + startAttempt, + restartAttempt, + submitAttempt, + goToQuestion, + goToNextQuestion, + goToPreviousQuestion, + updateAnswer, + getAnswer, + } = useQuizSession(moduleId); + + useEffect(() => { + if (!latestResult || latestToastResultId.current === latestResult.attemptId) { + return; + } + + latestToastResultId.current = latestResult.attemptId; + showToast( + latestResult.scorePercentage >= 70 ? 'success' : 'info', + `${moduleConfig.title} quiz finished with ${latestResult.scorePercentage}% in ${formatRemainingTime( + latestResult.elapsedSeconds + )}.`, + 4000 + ); + }, [latestResult, moduleConfig.title, showToast]); + + const currentQuestionAnswer = currentQuestion ? getAnswer(currentQuestion.id) : null; + const totalQuestions = activeAttempt?.questionIds.length ?? bank?.questionsPerAttempt ?? 5; + const totalBankQuestions = bank?.questions.length ?? 0; + const totalAllowedSeconds = (bank?.timeLimitMinutes ?? 10) * 60; + const timerProgress = totalAllowedSeconds === 0 ? 0 : remainingSeconds / totalAllowedSeconds; + const attemptProgress = totalQuestions === 0 ? 0 : answeredQuestions / totalQuestions; + const lowTime = remainingSeconds <= 60; + const timerAnnouncement = useMemo(() => { + if (!activeAttempt) { + return null; + } + + if (remainingSeconds <= 30) { + return `${remainingSeconds} seconds remaining`; + } + + if (remainingSeconds > 0 && remainingSeconds % 60 === 0) { + return `${remainingSeconds / 60} minutes remaining`; + } + + return null; + }, [activeAttempt, remainingSeconds]); + + const reviewItems = useMemo(() => { + if (!bank || !latestResult) { + return []; + } + + const questionMap = bank.questions.reduce>( + (accumulator, question) => { + accumulator[question.id] = question; + return accumulator; + }, + {} + ); + + return latestResult.questionOutcomes + .filter((outcome) => !outcome.isCorrect) + .map((outcome) => { + const question = questionMap[outcome.questionId]; + if (!question) { + return null; + } + + return { + question, + selectedAnswer: formatQuizAnswer(question, outcome.selectedAnswer), + correctAnswer: formatQuizAnswer(question, question.correctAnswer), + references: resolveQuizReferences(moduleId, question.references), + }; + }) + .filter( + ( + item + ): item is { + question: QuizQuestion; + selectedAnswer: string; + correctAnswer: string; + references: ReturnType; + } => item !== null + ); + }, [bank, latestResult, moduleId]); + + const heroStats = [ + { + value: `${totalBankQuestions}`, + label: 'Scenario Bank Size', + icon: , + }, + { + value: `${bank?.questionsPerAttempt ?? 5}`, + label: 'Questions Per Run', + icon: , + }, + { + value: `${bank?.timeLimitMinutes ?? 10} min`, + label: 'Challenge Clock', + icon: , + }, + ]; + + const renderIntro = () => ( +
    + +
    +

    + Scenario Challenge +

    +

    + Five hard questions. Ten minutes. No filler. +

    +

    {bank?.description}

    +
    + +
    +
    +

    Rules

    +
      +
    • + Expect scenario-based questions that focus on tradeoffs, debugging choices, and + architecture judgment. +
    • +
    • Each run samples a fresh set of five questions from the module bank.
    • +
    • Your active attempt resumes after a refresh until the timer expires.
    • +
    • + After grading, every incorrect answer links you back to the relevant module + sections. +
    • +
    +
    +
    +

    Recent Performance

    + {latestResult ? ( +
    +

    + Latest score:{' '} + + {latestResult.scorePercentage}% + +

    +

    + Tier:{' '} + + {latestResult.performanceTier} + +

    +

    + Points earned:{' '} + {latestResult.points} +

    +
    + ) : ( +

    + No attempts yet. Start with a timed run to seed your local progress history. +

    + )} +
    +
    + +
    + + {latestResult && ( + + )} +
    +
    + + {latestResult && reviewItems.length > 0 && ( + +
    + +

    Last Run Review

    +
    +
    + {reviewItems.map((item) => ( +
    +

    + Review Reference +

    +
    {item.question.prompt}
    +

    {item.question.explanation}

    +
    +
    +

    Your answer

    +

    {item.selectedAnswer}

    +
    +
    +

    Best answer

    +

    {item.correctAnswer}

    +
    +
    + {item.references.length > 0 && ( +
    + {item.references.map((reference) => ( + + Review {reference.label} + + ))} +
    + )} +
    + ))} +
    +
    + )} +
    + ); + + const renderActiveQuestion = () => { + if (!currentQuestion) { + return null; + } + + return ( +
    + +
    +
    +

    + Question {currentQuestionIndex + 1} of {totalQuestions} +

    +

    Timed Scenario

    +
    +
    +

    Time Left

    +

    {formatRemainingTime(remainingSeconds)}

    +
    +
    + +
    + {timerAnnouncement} +
    + +
    +
    +
    + + updateAnswer(currentQuestion.id, answer)} + /> + + +
    + + + {currentQuestionIndex === totalQuestions - 1 ? ( + + ) : ( + + )} +
    +
    + ); + }; + + const renderSidebarContent = () => { + if (isLoading) { + return null; + } + + return ( +
    + +
    + +

    Run Tracker

    +
    +
    +
    + Answered + + {answeredQuestions}/{totalQuestions} + +
    +
    +
    +
    +
    + Clock + + {formatRemainingTime(remainingSeconds)} + +
    +
    + + + {activeAttempt && ( + +
    + +

    Question Map

    +
    +
    + {questionsForAttempt.map((question, index) => { + const isCurrent = index === currentQuestionIndex; + const isAnswered = isQuizAnswerComplete(question, getAnswer(question.id)); + return ( + + ); + })} +
    +
    + )} + + +
    + +

    Local History

    +
    + {recentResults.length > 0 ? ( +
    + {recentResults.slice(0, 4).map((result) => ( +
    +
    + {result.performanceTier} + {result.scorePercentage}% +
    +
    + + {result.correctAnswers}/{result.totalQuestions} correct + + {result.points} pts +
    +
    + ))} +
    + ) : ( +

    + Results are saved in localStorage once you finish a timed run. +

    + )} +
    +
    + ); + }; + + const hero = ( +
    +
    +

    + End-of-Module Quiz +

    +

    {moduleConfig.title} scenario gauntlet

    +

    + Use the timed quiz to check whether the concepts in this module hold up under + production-style pressure. Every run picks five harder questions from a larger bank and + records your local progress for the next session. +

    +
    + +
    + ); + + if (isLoading) { + return ( + } + /> + ); + } + + if (loadError || !bank) { + return ( + +
    + +

    Quiz bank unavailable

    +
    +

    + {loadError ?? 'This module is still missing its quiz bank.'} +

    +

    + Expected file:{' '} + quiz-banks/{moduleConfig.bankFile} +

    +
    + } + /> + ); + } + + return ( + + ); +}; + +export default ModuleQuizSection; diff --git a/src/shared/components/quiz/QuestionRenderer.tsx b/src/shared/components/quiz/QuestionRenderer.tsx new file mode 100644 index 0000000..b8abd4b --- /dev/null +++ b/src/shared/components/quiz/QuestionRenderer.tsx @@ -0,0 +1,279 @@ +import React from 'react'; +import type { + QuizChoiceQuestion, + QuizMatchingQuestion, + QuizMultiSelectQuestion, + QuizOrderingQuestion, + QuizQuestion, + QuizStoredAnswer, +} from '../../../types/quiz'; + +interface QuestionRendererProps { + question: QuizQuestion; + answer: QuizStoredAnswer | null; + colorScheme: string; + onAnswerChange: (answer: QuizStoredAnswer) => void; +} + +const colorStyles: Record = { + indigo: { + selected: 'border-indigo-500 bg-indigo-50 ring-2 ring-indigo-100', + badge: 'bg-indigo-100 text-indigo-800', + muted: 'text-indigo-700', + }, + purple: { + selected: 'border-purple-500 bg-purple-50 ring-2 ring-purple-100', + badge: 'bg-purple-100 text-purple-800', + muted: 'text-purple-700', + }, + orange: { + selected: 'border-orange-500 bg-orange-50 ring-2 ring-orange-100', + badge: 'bg-orange-100 text-orange-800', + muted: 'text-orange-700', + }, + blue: { + selected: 'border-blue-500 bg-blue-50 ring-2 ring-blue-100', + badge: 'bg-blue-100 text-blue-800', + muted: 'text-blue-700', + }, + emerald: { + selected: 'border-emerald-500 bg-emerald-50 ring-2 ring-emerald-100', + badge: 'bg-emerald-100 text-emerald-800', + muted: 'text-emerald-700', + }, + rose: { + selected: 'border-rose-500 bg-rose-50 ring-2 ring-rose-100', + badge: 'bg-rose-100 text-rose-800', + muted: 'text-rose-700', + }, + green: { + selected: 'border-green-500 bg-green-50 ring-2 ring-green-100', + badge: 'bg-green-100 text-green-800', + muted: 'text-green-700', + }, + sky: { + selected: 'border-sky-500 bg-sky-50 ring-2 ring-sky-100', + badge: 'bg-sky-100 text-sky-800', + muted: 'text-sky-700', + }, + amber: { + selected: 'border-amber-500 bg-amber-50 ring-2 ring-amber-100', + badge: 'bg-amber-100 text-amber-800', + muted: 'text-amber-700', + }, +}; + +const getStyles = (colorScheme: string) => colorStyles[colorScheme] ?? colorStyles.blue; + +const optionBaseClass = + 'w-full rounded-xl border border-gray-200 bg-white p-4 text-left transition-all duration-200 hover:border-gray-300 hover:bg-gray-50'; + +const isNumberArray = (value: QuizStoredAnswer | null): value is number[] => + Array.isArray(value) && value.every((item) => typeof item === 'number'); + +const isStringArray = (value: QuizStoredAnswer | null): value is string[] => + Array.isArray(value) && value.every((item) => typeof item === 'string'); + +export const QuestionRenderer: React.FC = ({ + question, + answer, + colorScheme, + onAnswerChange, +}) => { + const styles = getStyles(colorScheme); + + const renderChoiceQuestion = ( + choiceQuestion: QuizChoiceQuestion | QuizMultiSelectQuestion, + multiSelect = false + ) => { + const selectedAnswers = isNumberArray(answer) ? answer : []; + const selectedAnswer = typeof answer === 'number' ? answer : -1; + + return ( +
    + {choiceQuestion.options.map((option: string, index: number) => { + const isSelected = multiSelect + ? selectedAnswers.includes(index) + : selectedAnswer === index; + + return ( + + ); + })} +
    + ); + }; + + const renderOrderingQuestion = (orderingQuestion: QuizOrderingQuestion) => { + const currentOrder = + isStringArray(answer) && answer.length === orderingQuestion.items.length + ? answer + : orderingQuestion.items; + + const moveItem = (fromIndex: number, direction: -1 | 1) => { + const targetIndex = fromIndex + direction; + if (targetIndex < 0 || targetIndex >= currentOrder.length) { + return; + } + + const nextOrder = [...currentOrder]; + [nextOrder[fromIndex], nextOrder[targetIndex]] = [ + nextOrder[targetIndex], + nextOrder[fromIndex], + ]; + onAnswerChange(nextOrder); + }; + + return ( +
      + {currentOrder.map((item: string, index: number) => ( +
    1. + + {index + 1} + + {item} +
      + + +
      +
    2. + ))} +
    + ); + }; + + const renderMatchingQuestion = (matchingQuestion: QuizMatchingQuestion) => { + const selectedMatches = + isNumberArray(answer) && answer.length === matchingQuestion.premises.length + ? answer + : Array(matchingQuestion.premises.length).fill(-1); + + return ( +
    + {matchingQuestion.premises.map((premise: string, premiseIndex: number) => ( +
    + +
    + ))} +
    + ); + }; + + return ( +
    +
    + + {question.difficulty} + + + {question.type.replace('-', ' ')} + +
    + +
    +

    + {question.scenario} +

    +

    {question.prompt}

    +
    + + {(question.type === 'single-choice' || question.type === 'true-false') && + renderChoiceQuestion(question, false)} + {question.type === 'multi-select' && renderChoiceQuestion(question, true)} + {question.type === 'ordering' && renderOrderingQuestion(question)} + {question.type === 'matching' && renderMatchingQuestion(question)} + + {question.type === 'multi-select' && ( +

    + Select every option that meaningfully contributes to the outcome. +

    + )} + {question.type === 'ordering' && ( +

    + Use the controls to move each step into the order you would execute it in practice. +

    + )} + {question.type === 'matching' && ( +

    + Match each scenario clue to the response that best addresses it. +

    + )} +
    + ); +}; + +export default QuestionRenderer; diff --git a/src/shared/constants/moduleNavigation.ts b/src/shared/constants/moduleNavigation.ts new file mode 100644 index 0000000..6353284 --- /dev/null +++ b/src/shared/constants/moduleNavigation.ts @@ -0,0 +1,472 @@ +export interface SidebarItem { + label: string; + path: string; + subItems?: SidebarItem[]; +} + +export type LearningModuleId = + | 'javascript' + | 'rxjs' + | 'git' + | 'datastructures' + | 'react' + | 'nextjs' + | 'bigo' + | 'python' + | 'systemdesign' + | 'typescript' + | 'ai' + | 'nodejs' + | 'devops' + | 'auth'; + +export interface LearningModuleConfig { + id: LearningModuleId; + title: string; + path: `/${LearningModuleId}`; + bankFile: `${LearningModuleId}.quiz.json`; + theme: LearningModuleId; +} + +export const learningModuleConfigs: Record = { + javascript: { + id: 'javascript', + title: 'JavaScript', + path: '/javascript', + bankFile: 'javascript.quiz.json', + theme: 'javascript', + }, + rxjs: { + id: 'rxjs', + title: 'RxJS', + path: '/rxjs', + bankFile: 'rxjs.quiz.json', + theme: 'rxjs', + }, + git: { + id: 'git', + title: 'Git', + path: '/git', + bankFile: 'git.quiz.json', + theme: 'git', + }, + datastructures: { + id: 'datastructures', + title: 'Data Structures', + path: '/datastructures', + bankFile: 'datastructures.quiz.json', + theme: 'datastructures', + }, + react: { + id: 'react', + title: 'React', + path: '/react', + bankFile: 'react.quiz.json', + theme: 'react', + }, + nextjs: { + id: 'nextjs', + title: 'Next.js', + path: '/nextjs', + bankFile: 'nextjs.quiz.json', + theme: 'nextjs', + }, + bigo: { + id: 'bigo', + title: 'Big-O Notation', + path: '/bigo', + bankFile: 'bigo.quiz.json', + theme: 'bigo', + }, + python: { + id: 'python', + title: 'Python', + path: '/python', + bankFile: 'python.quiz.json', + theme: 'python', + }, + systemdesign: { + id: 'systemdesign', + title: 'System Design', + path: '/systemdesign', + bankFile: 'systemdesign.quiz.json', + theme: 'systemdesign', + }, + typescript: { + id: 'typescript', + title: 'TypeScript', + path: '/typescript', + bankFile: 'typescript.quiz.json', + theme: 'typescript', + }, + ai: { + id: 'ai', + title: 'AI Fundamentals', + path: '/ai', + bankFile: 'ai.quiz.json', + theme: 'ai', + }, + nodejs: { + id: 'nodejs', + title: 'Node.js', + path: '/nodejs', + bankFile: 'nodejs.quiz.json', + theme: 'nodejs', + }, + devops: { + id: 'devops', + title: 'DevOps', + path: '/devops', + bankFile: 'devops.quiz.json', + theme: 'devops', + }, + auth: { + id: 'auth', + title: 'Authentication & Authorization', + path: '/auth', + bankFile: 'auth.quiz.json', + theme: 'auth', + }, +}; + +const baseModuleNavigationSections: Record = { + '/javascript': [ + { label: 'Introduction', path: '/javascript?section=Introduction' }, + { label: 'JavaScript History', path: '/javascript?section=JavaScript%20History' }, + { + label: 'Engine & Runtime Comparison', + path: '/javascript?section=Engine%20%26%20Runtime%20Comparison', + }, + { + label: 'JavaScript Engine', + path: '/javascript?section=JavaScript%20Engine', + subItems: [ + { + label: 'Call Stack & Execution', + path: '/javascript?section=Call%20Stack%20%26%20Execution', + }, + { + label: 'Memory Heap & Objects', + path: '/javascript?section=Memory%20Heap%20%26%20Objects', + }, + { + label: 'Parser & AST Generation', + path: '/javascript?section=Parser%20%26%20AST%20Generation', + }, + { + label: 'JIT Compilation Pipeline', + path: '/javascript?section=JIT%20Compilation%20Pipeline', + }, + { label: 'Garbage Collection', path: '/javascript?section=Garbage%20Collection' }, + ], + }, + { + label: 'JavaScript Runtime', + path: '/javascript?section=JavaScript%20Runtime', + subItems: [ + { + label: 'Event Loop & Coordination', + path: '/javascript?section=Event%20Loop%20%26%20Coordination', + }, + { + label: 'Web APIs & Platform', + path: '/javascript?section=Web%20APIs%20%26%20Platform', + }, + { + label: 'Task Queues & Priority', + path: '/javascript?section=Task%20Queues%20%26%20Priority', + }, + { + label: 'V8 Runtime Features', + path: '/javascript?section=V8%20Runtime%20Features', + }, + ], + }, + { + label: 'Memory Management', + path: '/javascript?section=Memory%20Management', + subItems: [{ label: 'Memory Leaks', path: '/javascript?section=Memory%20Leaks' }], + }, + { label: 'Visualization', path: '/javascript?section=Visualization' }, + ], + '/rxjs': [ + { label: 'Introduction', path: '/rxjs?section=Introduction' }, + { label: 'Reactive Manifesto', path: '/rxjs?section=Reactive%20Manifesto' }, + { + label: 'Core Components', + path: '/rxjs?section=Core%20Components', + subItems: [ + { label: 'Observables', path: '/rxjs?section=Observables' }, + { label: 'Operators', path: '/rxjs?section=Operators' }, + { label: 'Subjects', path: '/rxjs?section=Subjects' }, + ], + }, + { + label: 'Advanced Concepts', + path: '/rxjs?section=Advanced%20Concepts', + subItems: [ + { label: 'Advanced Operators', path: '/rxjs?section=Advanced%20Operators' }, + { label: 'Marble Diagrams', path: '/rxjs?section=Marble%20Diagrams' }, + { label: 'Error Handling', path: '/rxjs?section=Error%20Handling' }, + ], + }, + { + label: 'Real-World Applications', + path: '/rxjs?section=Real-World%20Applications', + subItems: [ + { label: 'Real-World Examples', path: '/rxjs?section=Real-World%20Examples' }, + { label: 'Visualization Tool', path: '/rxjs?section=Visualization%20Tool' }, + ], + }, + ], + '/git': [ + { label: 'Introduction', path: '/git?section=Introduction' }, + { label: 'Git Architecture', path: '/git?section=Git%20Architecture' }, + { label: 'Three-Tree Model', path: '/git?section=Three-Tree%20Model' }, + { label: 'Object Model', path: '/git?section=Object%20Model' }, + { label: 'Core Workflow', path: '/git?section=Core%20Workflow' }, + { label: 'Branching & Merging', path: '/git?section=Branching%20%26%20Merging' }, + { label: 'Professional Workflows', path: '/git?section=Professional%20Workflows' }, + { label: 'History Management', path: '/git?section=History%20Management' }, + { label: 'Troubleshooting', path: '/git?section=Troubleshooting' }, + { label: 'Visualization', path: '/git?section=Visualization' }, + ], + '/datastructures': [ + { label: 'Introduction', path: '/datastructures?section=Introduction' }, + { + label: 'Linear Structures', + path: '/datastructures?section=Linear%20Structures', + subItems: [ + { label: 'Arrays', path: '/datastructures?section=Arrays' }, + { label: 'Linked Lists', path: '/datastructures?section=Linked%20Lists' }, + { label: 'Stacks', path: '/datastructures?section=Stacks' }, + { label: 'Queues', path: '/datastructures?section=Queues' }, + { label: 'Deques', path: '/datastructures?section=Deques' }, + { label: 'Strings', path: '/datastructures?section=Strings' }, + ], + }, + { label: 'Hash Tables', path: '/datastructures?section=Hash%20Tables' }, + { label: 'Sets', path: '/datastructures?section=Sets' }, + { + label: 'Tree Structures', + path: '/datastructures?section=Tree%20Structures', + subItems: [ + { label: 'Binary Trees', path: '/datastructures?section=Binary%20Trees' }, + { label: 'Binary Search Trees', path: '/datastructures?section=Binary%20Search%20Trees' }, + { label: 'AVL Trees', path: '/datastructures?section=AVL%20Trees' }, + { label: 'Red-Black Trees', path: '/datastructures?section=Red-Black%20Trees' }, + { label: 'Heaps', path: '/datastructures?section=Heaps' }, + { label: 'B-Trees', path: '/datastructures?section=B-Trees' }, + ], + }, + { + label: 'Graph Structures', + path: '/datastructures?section=Graph%20Structures', + subItems: [ + { label: 'Graph Overview', path: '/datastructures?section=graphs' }, + { label: 'Graph Representation', path: '/datastructures?section=graph-representation' }, + { label: 'Graph Traversal', path: '/datastructures?section=graph-traversal' }, + { label: 'Shortest Path', path: '/datastructures?section=shortest-path' }, + { label: 'Minimum Spanning Tree', path: '/datastructures?section=minimum-spanning-tree' }, + { label: 'Graph Types', path: '/datastructures?section=graph-types' }, + ], + }, + { label: 'Complexity Analysis', path: '/datastructures?section=Complexity%20Analysis' }, + { label: 'Interactive Playground', path: '/datastructures?section=playground' }, + { label: 'Real-World Applications', path: '/datastructures?section=Real-World%20Applications' }, + { label: 'Practice Problems', path: '/datastructures?section=Practice%20Problems' }, + { label: '3D Visualizations', path: '/datastructures?section=3D%20Visualizations' }, + ], + '/react': [ + { label: 'Introduction', path: '/react?section=Introduction' }, + { label: 'JSX', path: '/react?section=JSX' }, + { label: 'Components', path: '/react?section=Components' }, + { label: 'State & Props', path: '/react?section=State%20%26%20Props' }, + { label: 'Hooks', path: '/react?section=Hooks' }, + { label: 'DOM Fundamentals', path: '/react?section=DOM%20Fundamentals' }, + { label: 'Virtual DOM', path: '/react?section=Virtual%20DOM' }, + { label: 'Reconciliation', path: '/react?section=Reconciliation' }, + ], + '/nextjs': [ + { label: 'Introduction', path: '/nextjs?section=Introduction' }, + { label: 'Routing Systems', path: '/nextjs?section=Routing%20Systems' }, + { label: 'Rendering Strategies', path: '/nextjs?section=Rendering%20Strategies' }, + { + label: 'Server & Client Components', + path: '/nextjs?section=Server%20%26%20Client%20Components', + }, + { + label: 'Data Fetching & Mutations', + path: '/nextjs?section=Data%20Fetching%20%26%20Mutations', + }, + { + label: 'Middleware & Route Handlers', + path: '/nextjs?section=Middleware%20%26%20Route%20Handlers', + }, + { + label: 'Optimization & Performance', + path: '/nextjs?section=Optimization%20%26%20Performance', + }, + ], + '/bigo': [ + { label: 'Introduction', path: '/bigo?section=introduction' }, + { label: 'Core Concepts', path: '/bigo?section=core-concepts' }, + { label: 'Common Complexities', path: '/bigo?section=common-complexities' }, + { label: 'Algorithm Analysis', path: '/bigo?section=algorithm-analysis' }, + { label: 'Advanced Concepts', path: '/bigo?section=advanced-concepts' }, + { label: 'Practice Challenges', path: '/bigo?section=practice-challenges' }, + { label: 'Interactive Playground', path: '/bigo?section=playground' }, + { label: 'Gamification Hub', path: '/bigo?section=gamification-hub' }, + { label: 'Real-World Applications', path: '/bigo?section=real-world-applications' }, + ], + '/python': [ + { label: 'Introduction', path: '/python?section=Introduction' }, + { label: 'Python Philosophy', path: '/python?section=Python%20Philosophy' }, + { label: 'Execution Model', path: '/python?section=Execution%20Model' }, + { label: 'Memory Management', path: '/python?section=Memory%20Management' }, + { label: 'Global Interpreter Lock', path: '/python?section=Global%20Interpreter%20Lock' }, + { label: 'Advanced Concepts', path: '/python?section=Advanced%20Concepts' }, + ], + '/systemdesign': [ + { label: 'Introduction', path: '/systemdesign?section=Introduction' }, + { label: 'Architecture Patterns', path: '/systemdesign?section=Architecture%20Patterns' }, + { label: 'Distributed Systems', path: '/systemdesign?section=Distributed%20Systems' }, + { label: 'Scaling Strategies', path: '/systemdesign?section=Scaling%20Strategies' }, + { label: 'Design Principles', path: '/systemdesign?section=Design%20Principles' }, + { label: 'Case Studies', path: '/systemdesign?section=Case%20Studies' }, + { label: 'Visualization', path: '/systemdesign?section=Visualization' }, + ], + '/typescript': [ + { label: 'Introduction', path: '/typescript?section=Introduction' }, + { label: 'TypeScript vs JavaScript', path: '/typescript?section=TypeScript%20vs%20JavaScript' }, + { + label: 'OOP Fundamentals', + path: '/typescript?section=OOP%20Fundamentals', + subItems: [ + { label: 'Classes & Objects', path: '/typescript?section=Classes%20%26%20Objects' }, + { label: 'Inheritance', path: '/typescript?section=Inheritance' }, + { label: 'Polymorphism', path: '/typescript?section=Polymorphism' }, + { label: 'Encapsulation', path: '/typescript?section=Encapsulation' }, + { label: 'Abstraction', path: '/typescript?section=Abstraction' }, + ], + }, + { + label: 'Advanced TypeScript', + path: '/typescript?section=Advanced%20TypeScript', + subItems: [ + { label: 'Generics', path: '/typescript?section=Generics' }, + { label: 'Decorators', path: '/typescript?section=Decorators' }, + { label: 'Advanced Types', path: '/typescript?section=Advanced%20Types' }, + { label: 'Type Guards', path: '/typescript?section=Type%20Guards' }, + ], + }, + { label: 'TypeScript Visualization', path: '/typescript?section=TypeScript%20Visualization' }, + { label: 'Best Practices', path: '/typescript?section=Best%20Practices' }, + { label: 'Migration Guide', path: '/typescript?section=Migration%20Guide' }, + ], + '/ai': [ + { label: 'Introduction', path: '/ai?section=Introduction' }, + { label: 'ML Lifecycle', path: '/ai?section=ML%20Lifecycle' }, + { label: 'Feature Engineering', path: '/ai?section=Feature%20Engineering' }, + { label: 'Neural Networks', path: '/ai?section=Neural%20Networks' }, + { label: 'Loss Functions', path: '/ai?section=Loss%20Functions' }, + { label: 'Gradient Descent', path: '/ai?section=Gradient%20Descent' }, + { label: 'Backpropagation', path: '/ai?section=Backpropagation' }, + { label: 'Generalization', path: '/ai?section=Generalization' }, + { label: 'Training vs Inference', path: '/ai?section=Training%20vs%20Inference' }, + { label: 'Word Embeddings', path: '/ai?section=Word%20Embeddings' }, + { label: 'RAG Pipeline', path: '/ai?section=RAG%20Pipeline' }, + ], + '/nodejs': [ + { label: 'Introduction', path: '/nodejs?section=Introduction' }, + { label: 'Event Loop', path: '/nodejs?section=Event%20Loop' }, + { label: 'Async Programming', path: '/nodejs?section=Async%20Programming' }, + { label: 'Buffers & Streams', path: '/nodejs?section=Buffers%20%26%20Streams' }, + { label: 'Scaling', path: '/nodejs?section=Scaling' }, + { label: 'Memory Management', path: '/nodejs?section=Memory%20Management' }, + { label: 'Module System', path: '/nodejs?section=Module%20System' }, + { label: 'Package Managers', path: '/nodejs?section=Package%20Managers' }, + { label: 'Frameworks', path: '/nodejs?section=Frameworks' }, + { label: 'Runtime Wars', path: '/nodejs?section=Runtime%20Wars' }, + ], + '/devops': [ + { label: 'Introduction', path: '/devops?section=Introduction' }, + { label: 'CI/CD Pipeline', path: '/devops?section=CI%2FCD%20Pipeline' }, + { label: 'Cloud Service Models', path: '/devops?section=Cloud%20Service%20Models' }, + { label: 'Cloud Architecture', path: '/devops?section=Cloud%20Architecture' }, + { label: 'Container Orchestration', path: '/devops?section=Container%20Orchestration' }, + { label: 'Infrastructure as Code', path: '/devops?section=Infrastructure%20as%20Code' }, + { label: 'Modern Dev Roles', path: '/devops?section=Modern%20Dev%20Roles' }, + { label: 'Observability', path: '/devops?section=Observability' }, + ], + '/auth': [ + { label: 'Introduction', path: '/auth?section=Introduction' }, + { + label: 'Evolution of Digital Identity', + path: '/auth?section=Evolution%20of%20Digital%20Identity', + }, + { label: 'Authentication Types', path: '/auth?section=Authentication%20Types' }, + { label: 'Authorization Models', path: '/auth?section=Authorization%20Models' }, + { + label: 'OAuth 2.0 & OpenID Connect', + path: '/auth?section=OAuth%202.0%20%26%20OpenID%20Connect', + }, + { label: 'PKCE Authorization Flow', path: '/auth?section=PKCE%20Authorization%20Flow' }, + { label: 'BFF Pattern', path: '/auth?section=BFF%20Pattern' }, + { label: 'AI Agent Authentication', path: '/auth?section=AI%20Agent%20Authentication' }, + { label: 'Visualization', path: '/auth?section=Visualization' }, + ], + '/': [], + '/about': [], +}; + +export const getSectionQueryValue = (path: string): string | null => { + const queryStart = path.indexOf('?'); + if (queryStart === -1) { + return null; + } + + const query = path.slice(queryStart + 1); + return new URLSearchParams(query).get('section'); +}; + +const createQuizSidebarItem = (path: string): SidebarItem => ({ + label: 'Quiz', + path: `${path}?section=Quiz`, +}); + +export const moduleNavigationSections: Record = Object.fromEntries( + Object.entries(baseModuleNavigationSections).map(([path, items]) => { + if (path === '/' || path === '/about') { + return [path, items]; + } + + return [path, [...items, createQuizSidebarItem(path)]]; + }) +); + +export const learningModuleIds = Object.keys(learningModuleConfigs) as LearningModuleId[]; + +export const getModuleIdFromPathname = (pathname: string): LearningModuleId | null => { + const match = learningModuleIds.find( + (moduleId) => learningModuleConfigs[moduleId].path === pathname + ); + return match ?? null; +}; + +export const getModuleConfigFromPathname = (pathname: string): LearningModuleConfig | null => { + const moduleId = getModuleIdFromPathname(pathname); + return moduleId ? learningModuleConfigs[moduleId] : null; +}; + +const flattenSidebarItems = (items: SidebarItem[]): SidebarItem[] => + items.flatMap((item) => [item, ...(item.subItems ? flattenSidebarItems(item.subItems) : [])]); + +export const getSectionPathByLabel = ( + moduleId: LearningModuleId, + sectionLabel: string +): string | null => { + const items = moduleNavigationSections[learningModuleConfigs[moduleId].path] ?? []; + const match = flattenSidebarItems(items).find((item) => item.label === sectionLabel); + return match?.path ?? null; +}; diff --git a/src/shared/hooks/useQuizSession.ts b/src/shared/hooks/useQuizSession.ts new file mode 100644 index 0000000..dbd0ef6 --- /dev/null +++ b/src/shared/hooks/useQuizSession.ts @@ -0,0 +1,341 @@ +import { useCallback, useEffect, useMemo, useState } from 'react'; +import type { LearningModuleId } from '../constants/moduleNavigation'; +import type { + QuizAttemptState, + QuizBankFile, + QuizQuestion, + QuizResultSummary, + QuizStoredAnswer, +} from '../../types/quiz'; +import { + QUIZ_HISTORY_LIMIT, + buildQuizResult, + createQuestionMap, + createQuizAttempt, + getQuizStorageKeys, + isQuizAnswerComplete, + loadQuizBank, +} from '../../utils/quiz'; + +interface UseQuizSessionResult { + bank: QuizBankFile | null; + isLoading: boolean; + loadError: string | null; + activeAttempt: QuizAttemptState | null; + questionsForAttempt: QuizQuestion[]; + currentQuestion: QuizQuestion | null; + currentQuestionIndex: number; + answeredQuestions: number; + remainingSeconds: number; + recentResults: QuizResultSummary[]; + latestResult: QuizResultSummary | null; + startAttempt: () => void; + restartAttempt: () => void; + submitAttempt: () => void; + goToQuestion: (index: number) => void; + goToNextQuestion: () => void; + goToPreviousQuestion: () => void; + updateAnswer: (questionId: string, answer: QuizStoredAnswer) => void; + getAnswer: (questionId: string) => QuizStoredAnswer | null; +} + +const safeParse = (value: string | null): T | null => { + if (!value) { + return null; + } + + try { + return JSON.parse(value) as T; + } catch { + return null; + } +}; + +export const useQuizSession = (moduleId: LearningModuleId): UseQuizSessionResult => { + const [bank, setBank] = useState(null); + const [isLoading, setIsLoading] = useState(true); + const [loadError, setLoadError] = useState(null); + const [activeAttempt, setActiveAttempt] = useState(null); + const [recentResults, setRecentResults] = useState([]); + const [remainingSeconds, setRemainingSeconds] = useState(0); + const [hasHydrated, setHasHydrated] = useState(false); + + const storageKeys = useMemo(() => getQuizStorageKeys(moduleId), [moduleId]); + + const questionMap = useMemo(() => (bank ? createQuestionMap(bank) : {}), [bank]); + + const finalizeAttempt = useCallback( + (status: 'completed' | 'timed-out') => { + if (!bank || !activeAttempt) { + return; + } + + const result = buildQuizResult(bank, activeAttempt, status); + setRecentResults((previousResults) => + [result, ...previousResults.filter((item) => item.attemptId !== result.attemptId)].slice( + 0, + QUIZ_HISTORY_LIMIT + ) + ); + setActiveAttempt(null); + setRemainingSeconds(bank.timeLimitMinutes * 60); + }, + [activeAttempt, bank] + ); + + useEffect(() => { + let isCancelled = false; + + const fetchQuizBank = async () => { + setIsLoading(true); + setLoadError(null); + + try { + const loadedBank = await loadQuizBank(moduleId); + if (isCancelled) { + return; + } + + if (!loadedBank) { + setBank(null); + setLoadError('Quiz bank is missing or invalid for this module.'); + } else { + setBank(loadedBank); + setRemainingSeconds(loadedBank.timeLimitMinutes * 60); + } + } catch { + if (!isCancelled) { + setBank(null); + setLoadError('Quiz bank could not be loaded for this module.'); + } + } finally { + if (!isCancelled) { + setIsLoading(false); + } + } + }; + + void fetchQuizBank(); + + return () => { + isCancelled = true; + }; + }, [moduleId]); + + useEffect(() => { + if (!bank) { + return; + } + + const storedResults = safeParse(localStorage.getItem(storageKeys.results)); + const initialResults = Array.isArray(storedResults) ? storedResults : []; + + const storedAttempt = safeParse(localStorage.getItem(storageKeys.activeAttempt)); + if ( + storedAttempt && + storedAttempt.bankVersion === bank.version && + storedAttempt.questionIds.every((questionId) => Boolean(questionMap[questionId])) + ) { + const expiresAt = new Date(storedAttempt.expiresAt).getTime(); + if (expiresAt <= Date.now()) { + const timedOutResult = buildQuizResult(bank, storedAttempt, 'timed-out'); + setRecentResults( + [timedOutResult, ...initialResults.filter((item) => item.attemptId !== timedOutResult.attemptId)].slice( + 0, + QUIZ_HISTORY_LIMIT + ) + ); + localStorage.removeItem(storageKeys.activeAttempt); + setActiveAttempt(null); + } else { + setRecentResults(initialResults); + setActiveAttempt(storedAttempt); + } + } else { + setRecentResults(initialResults); + localStorage.removeItem(storageKeys.activeAttempt); + setActiveAttempt(null); + } + + setHasHydrated(true); + }, [bank, questionMap, storageKeys.activeAttempt, storageKeys.results]); + + useEffect(() => { + if (!hasHydrated) { + return; + } + + if (!activeAttempt) { + localStorage.removeItem(storageKeys.activeAttempt); + return; + } + + localStorage.setItem(storageKeys.activeAttempt, JSON.stringify(activeAttempt)); + }, [activeAttempt, hasHydrated, storageKeys.activeAttempt]); + + useEffect(() => { + if (!hasHydrated) { + return; + } + + localStorage.setItem(storageKeys.results, JSON.stringify(recentResults)); + }, [hasHydrated, recentResults, storageKeys.results]); + + useEffect(() => { + if (!activeAttempt || !bank) { + return; + } + + const updateRemainingTime = () => { + const nextRemainingSeconds = Math.max( + 0, + Math.round((new Date(activeAttempt.expiresAt).getTime() - Date.now()) / 1000) + ); + + setRemainingSeconds(nextRemainingSeconds); + if (nextRemainingSeconds <= 0) { + finalizeAttempt('timed-out'); + } + }; + + updateRemainingTime(); + const timerId = window.setInterval(updateRemainingTime, 1000); + + return () => { + window.clearInterval(timerId); + }; + }, [activeAttempt, bank, finalizeAttempt]); + + const questionsForAttempt = useMemo( + () => + activeAttempt + ? activeAttempt.questionIds + .map((questionId) => questionMap[questionId]) + .filter((question): question is QuizQuestion => Boolean(question)) + : [], + [activeAttempt, questionMap] + ); + + const currentQuestion = useMemo(() => { + if (!activeAttempt) { + return null; + } + + return questionMap[activeAttempt.questionIds[activeAttempt.currentQuestionIndex]] ?? null; + }, [activeAttempt, questionMap]); + + const answeredQuestions = useMemo(() => { + if (!activeAttempt) { + return 0; + } + + return activeAttempt.questionIds.reduce((count, questionId) => { + const question = questionMap[questionId]; + const answer = activeAttempt.answers[questionId]; + return question && isQuizAnswerComplete(question, answer) ? count + 1 : count; + }, 0); + }, [activeAttempt, questionMap]); + + const startAttempt = useCallback(() => { + if (!bank) { + return; + } + + const nextAttempt = createQuizAttempt(moduleId, bank); + setActiveAttempt(nextAttempt); + setRemainingSeconds(bank.timeLimitMinutes * 60); + }, [bank, moduleId]); + + const restartAttempt = useCallback(() => { + startAttempt(); + }, [startAttempt]); + + const submitAttempt = useCallback(() => { + finalizeAttempt('completed'); + }, [finalizeAttempt]); + + const goToQuestion = useCallback((index: number) => { + setActiveAttempt((previousAttempt) => { + if (!previousAttempt) { + return previousAttempt; + } + + const nextIndex = Math.max(0, Math.min(index, previousAttempt.questionIds.length - 1)); + return { ...previousAttempt, currentQuestionIndex: nextIndex }; + }); + }, []); + + const goToNextQuestion = useCallback(() => { + setActiveAttempt((previousAttempt) => { + if (!previousAttempt) { + return previousAttempt; + } + + return { + ...previousAttempt, + currentQuestionIndex: Math.min( + previousAttempt.currentQuestionIndex + 1, + previousAttempt.questionIds.length - 1 + ), + }; + }); + }, []); + + const goToPreviousQuestion = useCallback(() => { + setActiveAttempt((previousAttempt) => { + if (!previousAttempt) { + return previousAttempt; + } + + return { + ...previousAttempt, + currentQuestionIndex: Math.max(previousAttempt.currentQuestionIndex - 1, 0), + }; + }); + }, []); + + const updateAnswer = useCallback((questionId: string, answer: QuizStoredAnswer) => { + setActiveAttempt((previousAttempt) => { + if (!previousAttempt) { + return previousAttempt; + } + + return { + ...previousAttempt, + answers: { + ...previousAttempt.answers, + [questionId]: answer, + }, + }; + }); + }, []); + + const getAnswer = useCallback( + (questionId: string) => activeAttempt?.answers[questionId] ?? null, + [activeAttempt] + ); + + return { + bank, + isLoading, + loadError, + activeAttempt, + questionsForAttempt, + currentQuestion, + currentQuestionIndex: activeAttempt?.currentQuestionIndex ?? 0, + answeredQuestions, + remainingSeconds, + recentResults, + latestResult: recentResults[0] ?? null, + startAttempt, + restartAttempt, + submitAttempt, + goToQuestion, + goToNextQuestion, + goToPreviousQuestion, + updateAnswer, + getAnswer, + }; +}; + +export default useQuizSession; \ No newline at end of file diff --git a/src/types/quiz.ts b/src/types/quiz.ts new file mode 100644 index 0000000..60ce67b --- /dev/null +++ b/src/types/quiz.ts @@ -0,0 +1,102 @@ +import type { LearningModuleId } from '../shared/constants/moduleNavigation'; + +export type QuizDifficulty = 'intermediate' | 'advanced' | 'expert'; +export type QuizQuestionType = + | 'single-choice' + | 'multi-select' + | 'true-false' + | 'ordering' + | 'matching'; + +export interface QuizQuestionBase { + id: string; + type: QuizQuestionType; + difficulty: QuizDifficulty; + scenario: string; + prompt: string; + explanation: string; + tags: string[]; + references: string[]; +} + +export interface QuizChoiceQuestion extends QuizQuestionBase { + type: 'single-choice' | 'true-false'; + options: string[]; + correctAnswer: number; +} + +export interface QuizMultiSelectQuestion extends QuizQuestionBase { + type: 'multi-select'; + options: string[]; + correctAnswer: number[]; +} + +export interface QuizOrderingQuestion extends QuizQuestionBase { + type: 'ordering'; + items: string[]; + correctAnswer: string[]; +} + +export interface QuizMatchingQuestion extends QuizQuestionBase { + type: 'matching'; + premises: string[]; + responses: string[]; + correctAnswer: number[]; +} + +export type QuizQuestion = + | QuizChoiceQuestion + | QuizMultiSelectQuestion + | QuizOrderingQuestion + | QuizMatchingQuestion; + +export type QuizStoredAnswer = number | number[] | string[]; + +export interface QuizBankFile { + moduleId: LearningModuleId; + moduleTitle: string; + description: string; + version: number; + timeLimitMinutes: number; + questionsPerAttempt: number; + questions: QuizQuestion[]; +} + +export interface QuizAttemptState { + id: string; + moduleId: LearningModuleId; + bankVersion: number; + questionIds: string[]; + answers: Record; + currentQuestionIndex: number; + startedAt: string; + expiresAt: string; +} + +export interface QuizQuestionOutcome { + questionId: string; + isCorrect: boolean; + selectedAnswer: QuizStoredAnswer | null; +} + +export interface QuizResultSummary { + attemptId: string; + moduleId: LearningModuleId; + status: 'completed' | 'timed-out'; + startedAt: string; + completedAt: string; + elapsedSeconds: number; + totalQuestions: number; + answeredQuestions: number; + correctAnswers: number; + incorrectAnswers: number; + scorePercentage: number; + performanceTier: string; + points: number; + questionOutcomes: QuizQuestionOutcome[]; +} + +export interface QuizReferenceLink { + label: string; + path: string; +} diff --git a/src/utils/quiz.ts b/src/utils/quiz.ts new file mode 100644 index 0000000..d03e438 --- /dev/null +++ b/src/utils/quiz.ts @@ -0,0 +1,321 @@ +import type { + QuizAttemptState, + QuizBankFile, + QuizQuestion, + QuizReferenceLink, + QuizResultSummary, + QuizStoredAnswer, +} from '../types/quiz'; +import { getSectionPathByLabel } from '../shared/constants/moduleNavigation'; +import type { LearningModuleId } from '../shared/constants/moduleNavigation'; + +const quizBankLoaders = import.meta.glob('/quiz-banks/*.quiz.json', { + import: 'default', +}); + +export const QUIZ_STORAGE_PREFIX = 'code-executives.quiz'; +export const QUIZ_HISTORY_LIMIT = 8; + +const PERFORMANCE_TIERS = [ + { minScore: 95, label: 'Executive Strategist' }, + { minScore: 85, label: 'Systems Thinker' }, + { minScore: 70, label: 'Scenario Navigator' }, + { minScore: 50, label: 'Concept Builder' }, + { minScore: 0, label: 'Needs Another Pass' }, +] as const; + +const isStringArray = (value: unknown): value is string[] => + Array.isArray(value) && value.every((item) => typeof item === 'string'); + +const isNumberArray = (value: unknown): value is number[] => + Array.isArray(value) && value.every((item) => typeof item === 'number'); + +const hasBaseQuestionShape = (value: unknown): value is Omit => { + if (!value || typeof value !== 'object') { + return false; + } + + const candidate = value as Record; + return ( + typeof candidate.id === 'string' && + typeof candidate.type === 'string' && + typeof candidate.difficulty === 'string' && + typeof candidate.scenario === 'string' && + typeof candidate.prompt === 'string' && + typeof candidate.explanation === 'string' && + isStringArray(candidate.tags) && + isStringArray(candidate.references) + ); +}; + +export const isQuizQuestion = (value: unknown): value is QuizQuestion => { + if (!hasBaseQuestionShape(value)) { + return false; + } + + const candidate = value as Record; + switch (candidate.type) { + case 'single-choice': + case 'true-false': + return isStringArray(candidate.options) && typeof candidate.correctAnswer === 'number'; + case 'multi-select': + return isStringArray(candidate.options) && isNumberArray(candidate.correctAnswer); + case 'ordering': + return isStringArray(candidate.items) && isStringArray(candidate.correctAnswer); + case 'matching': + return ( + isStringArray(candidate.premises) && + isStringArray(candidate.responses) && + isNumberArray(candidate.correctAnswer) + ); + default: + return false; + } +}; + +export const isQuizBankFile = ( + value: unknown, + moduleId: LearningModuleId +): value is QuizBankFile => { + if (!value || typeof value !== 'object') { + return false; + } + + const candidate = value as Record; + return ( + candidate.moduleId === moduleId && + typeof candidate.moduleTitle === 'string' && + typeof candidate.description === 'string' && + typeof candidate.version === 'number' && + typeof candidate.timeLimitMinutes === 'number' && + typeof candidate.questionsPerAttempt === 'number' && + Array.isArray(candidate.questions) && + candidate.questions.every((question) => isQuizQuestion(question)) + ); +}; + +export const loadQuizBank = async (moduleId: LearningModuleId): Promise => { + const loader = quizBankLoaders[`/quiz-banks/${moduleId}.quiz.json`]; + if (!loader) { + return null; + } + + const bank = await loader(); + return isQuizBankFile(bank, moduleId) ? bank : null; +}; + +export const getQuizStorageKeys = (moduleId: LearningModuleId) => ({ + activeAttempt: `${QUIZ_STORAGE_PREFIX}.${moduleId}.active-attempt`, + results: `${QUIZ_STORAGE_PREFIX}.${moduleId}.results`, +}); + +const sortNumberArray = (items: number[]): number[] => + [...items].sort((left, right) => left - right); + +const arraysEqual = (left: T[], right: T[]): boolean => + left.length === right.length && left.every((item, index) => item === right[index]); + +export const isQuizAnswerComplete = ( + question: QuizQuestion, + answer: QuizStoredAnswer | null | undefined +): boolean => { + if (answer === null || answer === undefined) { + return false; + } + + switch (question.type) { + case 'single-choice': + case 'true-false': + return typeof answer === 'number'; + case 'multi-select': + return isNumberArray(answer) && answer.length > 0; + case 'ordering': + return isStringArray(answer) && answer.length === question.items.length; + case 'matching': + return ( + isNumberArray(answer) && + answer.length === question.premises.length && + answer.every((item) => item >= 0) + ); + default: + return false; + } +}; + +export const isCorrectQuizAnswer = ( + question: QuizQuestion, + answer: QuizStoredAnswer | null | undefined +): boolean => { + if (!isQuizAnswerComplete(question, answer)) { + return false; + } + + switch (question.type) { + case 'single-choice': + case 'true-false': + return typeof answer === 'number' && question.correctAnswer === answer; + case 'multi-select': + return ( + isNumberArray(answer) && + arraysEqual(sortNumberArray(question.correctAnswer), sortNumberArray(answer)) + ); + case 'ordering': + return isStringArray(answer) && arraysEqual(question.correctAnswer, answer); + case 'matching': + return isNumberArray(answer) && arraysEqual(question.correctAnswer, answer); + default: + return false; + } +}; + +export const formatRemainingTime = (remainingSeconds: number): string => { + const safeSeconds = Math.max(0, remainingSeconds); + const minutes = Math.floor(safeSeconds / 60) + .toString() + .padStart(2, '0'); + const seconds = (safeSeconds % 60).toString().padStart(2, '0'); + return `${minutes}:${seconds}`; +}; + +export const getPerformanceTier = (scorePercentage: number): string => + PERFORMANCE_TIERS.find((tier) => scorePercentage >= tier.minScore)?.label ?? 'Needs Another Pass'; + +export const sampleQuizQuestionIds = ( + questions: QuizQuestion[], + questionsPerAttempt: number +): string[] => { + const questionIds = questions.map((question) => question.id); + for (let index = questionIds.length - 1; index > 0; index -= 1) { + const swapIndex = Math.floor(Math.random() * (index + 1)); + [questionIds[index], questionIds[swapIndex]] = [questionIds[swapIndex], questionIds[index]]; + } + + return questionIds.slice(0, Math.min(questionsPerAttempt, questionIds.length)); +}; + +export const createQuizAttempt = ( + moduleId: LearningModuleId, + bank: QuizBankFile +): QuizAttemptState => { + const startedAt = new Date(); + const expiresAt = new Date(startedAt.getTime() + bank.timeLimitMinutes * 60 * 1000); + + return { + id: + typeof crypto !== 'undefined' && 'randomUUID' in crypto + ? crypto.randomUUID() + : `${moduleId}-${startedAt.getTime()}`, + moduleId, + bankVersion: bank.version, + questionIds: sampleQuizQuestionIds(bank.questions, bank.questionsPerAttempt), + answers: {}, + currentQuestionIndex: 0, + startedAt: startedAt.toISOString(), + expiresAt: expiresAt.toISOString(), + }; +}; + +export const createQuestionMap = (bank: QuizBankFile): Record => + bank.questions.reduce>((accumulator, question) => { + accumulator[question.id] = question; + return accumulator; + }, {}); + +export const buildQuizResult = ( + bank: QuizBankFile, + attempt: QuizAttemptState, + status: 'completed' | 'timed-out', + completedAt: Date = new Date() +): QuizResultSummary => { + const questionMap = createQuestionMap(bank); + const questionOutcomes = attempt.questionIds.map((questionId) => { + const question = questionMap[questionId]; + const selectedAnswer = attempt.answers[questionId] ?? null; + + return { + questionId, + isCorrect: question ? isCorrectQuizAnswer(question, selectedAnswer) : false, + selectedAnswer, + }; + }); + + const answeredQuestions = questionOutcomes.filter( + (outcome) => outcome.selectedAnswer !== null + ).length; + const correctAnswers = questionOutcomes.filter((outcome) => outcome.isCorrect).length; + const totalQuestions = questionOutcomes.length; + const incorrectAnswers = totalQuestions - correctAnswers; + const scorePercentage = + totalQuestions === 0 ? 0 : Math.round((correctAnswers / totalQuestions) * 100); + const startedAt = new Date(attempt.startedAt); + const elapsedSeconds = Math.max( + 0, + Math.round((completedAt.getTime() - startedAt.getTime()) / 1000) + ); + const totalAllowedSeconds = bank.timeLimitMinutes * 60; + const timeBonusRatio = Math.max(0, (totalAllowedSeconds - elapsedSeconds) / totalAllowedSeconds); + const points = Math.round(correctAnswers * 120 + timeBonusRatio * 80); + + return { + attemptId: attempt.id, + moduleId: attempt.moduleId, + status, + startedAt: attempt.startedAt, + completedAt: completedAt.toISOString(), + elapsedSeconds, + totalQuestions, + answeredQuestions, + correctAnswers, + incorrectAnswers, + scorePercentage, + performanceTier: getPerformanceTier(scorePercentage), + points, + questionOutcomes, + }; +}; + +export const resolveQuizReferences = ( + moduleId: LearningModuleId, + references: string[] +): QuizReferenceLink[] => + references + .map((label) => { + const path = getSectionPathByLabel(moduleId, label); + return path ? { label, path } : null; + }) + .filter((item): item is QuizReferenceLink => item !== null); + +export const formatQuizAnswer = ( + question: QuizQuestion, + answer: QuizStoredAnswer | null | undefined +): string => { + if (!isQuizAnswerComplete(question, answer)) { + return 'Not answered'; + } + + switch (question.type) { + case 'single-choice': + case 'true-false': + return typeof answer === 'number' + ? (question.options[answer] ?? 'Unknown choice') + : 'Not answered'; + case 'multi-select': + return isNumberArray(answer) + ? answer.map((index: number) => question.options[index] ?? 'Unknown choice').join(', ') + : 'Not answered'; + case 'ordering': + return isStringArray(answer) ? answer.join(' -> ') : 'Not answered'; + case 'matching': + return isNumberArray(answer) + ? answer + .map((responseIndex: number, premiseIndex: number) => { + const premise = question.premises[premiseIndex] ?? `Pair ${premiseIndex + 1}`; + const response = question.responses[responseIndex] ?? 'Unmatched'; + return `${premise}: ${response}`; + }) + .join(' | ') + : 'Not answered'; + default: + return 'Not answered'; + } +}; From ca1c242d95d36c435a20cb4dc5222238a3600205 Mon Sep 17 00:00:00 2001 From: Mohammad Naim Faizy Date: Thu, 9 Apr 2026 13:25:18 +1000 Subject: [PATCH 2/3] fix: copy quiz banks into build output --- vite.config.ts | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/vite.config.ts b/vite.config.ts index 845b4fb..d449907 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -1,14 +1,38 @@ +import { cpSync, existsSync } from 'node:fs'; +import { resolve } from 'node:path'; import { defineConfig } from 'vite'; import react from '@vitejs/plugin-react'; import tailwindcss from '@tailwindcss/vite'; import { visualizer } from 'rollup-plugin-visualizer'; import type { Plugin } from 'vite'; +const copyQuizBanksPlugin = (): Plugin => { + let quizBanksSourceDir = ''; + let quizBanksOutputDir = ''; + + return { + name: 'copy-quiz-banks', + apply: 'build', + configResolved(config) { + quizBanksSourceDir = resolve(config.root, 'quiz-banks'); + quizBanksOutputDir = resolve(config.root, config.build.outDir, 'quiz-banks'); + }, + closeBundle() { + if (!existsSync(quizBanksSourceDir)) { + return; + } + + cpSync(quizBanksSourceDir, quizBanksOutputDir, { recursive: true }); + }, + }; +}; + // https://vite.dev/config/ export default defineConfig({ plugins: [ react(), tailwindcss(), + copyQuizBanksPlugin(), // Bundle analyzer - generates stats.html visualizer({ open: false, From 1c16c0a0e6059447d9fb9d710d1fab2b4ad3d5fe Mon Sep 17 00:00:00 2001 From: Mohammad Naim Faizy Date: Thu, 9 Apr 2026 13:35:44 +1000 Subject: [PATCH 3/3] fix: stabilize production preview chunking --- vite.config.ts | 45 +++++---------------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) diff --git a/vite.config.ts b/vite.config.ts index d449907..fc23a4a 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -64,67 +64,32 @@ export default defineConfig({ rollupOptions: { output: { manualChunks(id) { - // Vendor chunks - split by library size if (id.includes('node_modules')) { - // Three.js and related (large library) if (id.includes('three')) { return 'vendor-three'; } - // Monaco Editor (large, playground-only) if (id.includes('monaco-editor')) { return 'vendor-monaco'; } - // React Flow / xyflow (playground-only) if (id.includes('@xyflow')) { return 'vendor-xyflow'; } - // React ecosystem (core libraries) - if (id.includes('react') || id.includes('react-dom') || id.includes('react-router')) { - return 'vendor-react'; - } - // Lucide icons (medium-sized) - if (id.includes('lucide-react')) { - return 'vendor-icons'; - } - // Other node_modules - return 'vendor-misc'; - } - // Feature-based chunks (lazy loaded via routes) - // These will be automatically split by Vite's dynamic imports - - // Shared components - split by type - if (id.includes('/src/components/shared/')) { - return 'shared-components'; + // Let Rollup group the rest of node_modules automatically. + // The previous manual vendor splitting created circular chunk dependencies + // that broke the production preview runtime. + return undefined; } - // Python 3D visualizations (heavy) if (id.includes('/src/components/models3d/python/')) { return 'python-3d'; } - // Other 3D visualizations if (id.includes('/src/components/models3d/')) { return 'models-3d'; } - // 2D visualizations - keep with features for better initial load - // No manual chunking needed, will be in feature bundles - - // Shared utilities - if (id.includes('/src/utils/')) { - return 'shared-utils'; - } - - // Context providers - if (id.includes('/src/shared/contexts/')) { - return 'shared-contexts'; - } - - // Hooks - if (id.includes('/src/hooks/') || id.includes('/src/shared/hooks/')) { - return 'shared-hooks'; - } + return undefined; }, }, },