625 lines
26 KiB
TypeScript
625 lines
26 KiB
TypeScript
import React, { useRef, useState, useEffect } from "react";
|
|
import { Check, BookOpen, BarChart3, Zap, Target } from "lucide-react";
|
|
import { PracticeFromDataset } from "../../../components/lessons/LessonShell";
|
|
import {
|
|
COMMAND_EVIDENCE_EASY,
|
|
COMMAND_EVIDENCE_MEDIUM,
|
|
} from "../../../data/rw/command-of-evidence";
|
|
import EvidenceHunterWidget, {
|
|
type EvidenceExercise,
|
|
} from "../../../components/lessons/EvidenceHunterWidget";
|
|
import RevealCardGrid, {
|
|
type RevealCard,
|
|
} from "../../../components/lessons/RevealCardGrid";
|
|
import useScrollReveal from "../../../components/lessons/useScrollReveal";
|
|
|
|
interface LessonProps {
|
|
onFinish?: () => void;
|
|
}
|
|
|
|
/* ── Data for RevealCardGrid widgets ── */
|
|
const ILLUSTRATION_TRAPS: RevealCard[] = [
|
|
{
|
|
label: "Wrong speaker",
|
|
content:
|
|
"The quotation features the correct idea but from a different character.",
|
|
},
|
|
{
|
|
label: "Right topic, wrong direction",
|
|
content:
|
|
"The quotation mentions the topic but doesn't illustrate the specific claim.",
|
|
},
|
|
{
|
|
label: "Too indirect",
|
|
content:
|
|
"The connection between quotation and claim requires too much inferential leaping.",
|
|
},
|
|
{
|
|
label: "Question marks",
|
|
content: 'A rhetorical question often cannot "illustrate" a direct claim.',
|
|
},
|
|
];
|
|
|
|
const VALIDITY_TYPES: RevealCard[] = [
|
|
{
|
|
label: "Valid / Necessary",
|
|
sublabel: "CORRECT on SAT",
|
|
content:
|
|
"Must be true given the evidence; the only logical conclusion. Example: If 14% are supershear events → 86% are not.",
|
|
},
|
|
{
|
|
label: "Possible / Speculative",
|
|
sublabel: "WRONG on SAT",
|
|
content:
|
|
'Might be true but the evidence doesn\'t require it. Example: "Researchers must want more funding" — not stated.',
|
|
},
|
|
{
|
|
label: "Contradicted",
|
|
sublabel: "WRONG on SAT",
|
|
content:
|
|
'Directly conflicts with information stated in the passage. Example: "Exercise improves fitness equally for all" — passage says otherwise.',
|
|
},
|
|
{
|
|
label: "Off-topic",
|
|
sublabel: "WRONG on SAT",
|
|
content:
|
|
"No logical connection to the claim or evidence. Example: Ocean temperature claim when passage is about land volcanoes.",
|
|
},
|
|
];
|
|
|
|
const QUANT_WRONG_ANSWERS: RevealCard[] = [
|
|
{
|
|
label: "Wrong subgroup / time period",
|
|
content: "Accurate data about the WRONG subgroup or time period.",
|
|
},
|
|
{
|
|
label: "Wrong direction",
|
|
content:
|
|
"Accurate comparison in the WRONG direction (A > B when claim needs B > A).",
|
|
},
|
|
{
|
|
label: "Wrong number of groups",
|
|
content: "Involves TWO groups when the claim is about ONE group only.",
|
|
},
|
|
{
|
|
label: "Contradictory trend",
|
|
content:
|
|
"Describes a trend that contradicts the claim despite accurate numbers.",
|
|
},
|
|
{
|
|
label: "Right data, wrong claim",
|
|
content:
|
|
"Describes the graph accurately but doesn't address the specific claim.",
|
|
},
|
|
];
|
|
|
|
const EVIDENCE_EXERCISES: EvidenceExercise[] = [
|
|
{
|
|
question:
|
|
"The researcher concludes that urban green spaces reduce stress. Which sentence from the study best SUPPORTS this conclusion?",
|
|
passage: [
|
|
"Participants were randomly assigned to walk for 30 minutes in either an urban park or a busy commercial district.",
|
|
"Before and after each walk, cortisol levels were measured using saliva samples.",
|
|
"Participants who walked in the park showed a 15% reduction in cortisol, a primary stress hormone.",
|
|
"Those who walked in the commercial district showed no significant change in cortisol levels.",
|
|
"Participants reported feeling calmer after the park walk, though self-report data is inherently subjective.",
|
|
],
|
|
evidenceIndex: 2,
|
|
explanation:
|
|
"Sentence 3 provides direct biological evidence (cortisol reduction) that supports the claim about stress reduction. It uses objective measurement rather than self-report, making it the strongest support for the stated conclusion.",
|
|
},
|
|
{
|
|
question:
|
|
"Which sentence from this passage most effectively ILLUSTRATES the claim that microplastics are now found in unexpected locations?",
|
|
passage: [
|
|
"Microplastics are plastic fragments smaller than 5 millimeters.",
|
|
"They originate from the breakdown of larger plastic items or are manufactured at microscopic size.",
|
|
"Researchers have detected microplastics in the peak snowpack of Mount Everest.",
|
|
"Microplastics have also been found in human blood, lung tissue, and placentas.",
|
|
"The long-term health effects of microplastic exposure are still being studied.",
|
|
],
|
|
evidenceIndex: 2,
|
|
explanation:
|
|
"Sentence 3 best illustrates the claim about unexpected locations because Mount Everest is one of the most remote places on Earth — finding microplastics there is a striking, concrete example of how pervasive contamination has become.",
|
|
},
|
|
];
|
|
|
|
const EBRWCommandEvidenceLesson: React.FC<LessonProps> = ({ onFinish }) => {
|
|
const [activeSection, setActiveSection] = useState(0);
|
|
const sectionsRef = useRef<(HTMLElement | null)[]>([]);
|
|
|
|
useEffect(() => {
|
|
const observers: IntersectionObserver[] = [];
|
|
sectionsRef.current.forEach((el, idx) => {
|
|
if (!el) return;
|
|
const obs = new IntersectionObserver(
|
|
([entry]) => {
|
|
if (entry.isIntersecting) setActiveSection(idx);
|
|
},
|
|
{ threshold: 0.3 },
|
|
);
|
|
obs.observe(el);
|
|
observers.push(obs);
|
|
});
|
|
return () => observers.forEach((o) => o.disconnect());
|
|
}, []);
|
|
|
|
useScrollReveal();
|
|
|
|
const scrollToSection = (index: number) => {
|
|
setActiveSection(index);
|
|
sectionsRef.current[index]?.scrollIntoView({ behavior: "smooth" });
|
|
};
|
|
|
|
const SectionMarker = ({
|
|
index,
|
|
title,
|
|
icon: Icon,
|
|
}: {
|
|
index: number;
|
|
title: string;
|
|
icon: React.ComponentType<React.SVGProps<SVGSVGElement>>;
|
|
}) => {
|
|
const isActive = activeSection === index;
|
|
const isPast = activeSection > index;
|
|
return (
|
|
<button
|
|
onClick={() => scrollToSection(index)}
|
|
className={`flex items-center gap-3 p-3 w-full rounded-lg text-left transition-all ${isActive ? "bg-teal-50" : "hover:bg-slate-50"}`}
|
|
>
|
|
<div
|
|
className={`w-8 h-8 rounded-full flex items-center justify-center shrink-0
|
|
${isActive ? "bg-teal-600 text-white" : isPast ? "bg-teal-400 text-white" : "bg-slate-200 text-slate-500"}`}
|
|
>
|
|
{isPast ? (
|
|
<Check className="w-4 h-4" />
|
|
) : (
|
|
<Icon className="w-4 h-4" />
|
|
)}
|
|
</div>
|
|
<p
|
|
className={`text-sm font-bold ${isActive ? "text-teal-900" : "text-slate-600"}`}
|
|
>
|
|
{title}
|
|
</p>
|
|
</button>
|
|
);
|
|
};
|
|
|
|
return (
|
|
<div className="flex flex-col lg:flex-row min-h-screen">
|
|
<aside className="w-full lg:w-64 lg:fixed lg:top-14 lg:bottom-0 lg:overflow-y-auto p-4 border-r border-slate-200 bg-slate-50 z-0 hidden lg:block">
|
|
<nav className="space-y-2 pt-6">
|
|
<SectionMarker index={0} title="Textual Evidence" icon={BookOpen} />
|
|
<SectionMarker
|
|
index={1}
|
|
title="Quantitative Evidence"
|
|
icon={BarChart3}
|
|
/>
|
|
<SectionMarker index={2} title="Evidence Hunter" icon={Target} />
|
|
<SectionMarker index={3} title="Practice Questions" icon={Zap} />
|
|
</nav>
|
|
</aside>
|
|
|
|
<div className="flex-1 lg:ml-64 md:p-12 max-w-full mx-auto">
|
|
{/* Section 0 — Textual Evidence */}
|
|
<section
|
|
ref={(el) => {
|
|
sectionsRef.current[0] = el;
|
|
}}
|
|
className="min-h-screen flex flex-col justify-center mb-24 pt-20 lg:pt-0"
|
|
>
|
|
<div className="inline-flex items-center gap-2 bg-teal-100 text-teal-700 px-3 py-1 rounded-full text-xs font-bold uppercase tracking-wider mb-4 w-fit">
|
|
Information & Ideas — Domain 2
|
|
</div>
|
|
<h2 className="text-4xl font-extrabold text-slate-900 mb-2">
|
|
Command of Evidence
|
|
</h2>
|
|
<p className="text-lg text-slate-500 mb-8">
|
|
Move beyond the passage to apply its ideas. Two subtypes: Textual
|
|
Evidence (quotations) and Quantitative Evidence (graphs and tables).
|
|
</p>
|
|
|
|
<div className="scroll-reveal stagger-1 bg-teal-50 border border-teal-200 rounded-2xl p-5 mb-8">
|
|
<p className="text-sm text-slate-700">
|
|
<span className="font-bold text-teal-800">Overview: </span>Command
|
|
of Evidence questions ask you to move BEYOND the passage to apply
|
|
its ideas. You will identify quotations or data that illustrate,
|
|
support, or undermine a specific claim. There are two main
|
|
subtypes: Textual Evidence (using quotations or passages) and
|
|
Quantitative Evidence (using graphs and tables).
|
|
</p>
|
|
</div>
|
|
|
|
{/* 5A */}
|
|
<div className="scroll-reveal stagger-2 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
Illustrating a Claim (Quotation Selection)
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
These questions ask you to find the quotation from a poem, story,
|
|
or passage that best illustrates a claim stated in the question
|
|
stem. The claim is explicitly given to you — your job is to match
|
|
it to the correct quotation.
|
|
</p>
|
|
<div className="bg-teal-50 border border-teal-200 rounded-xl p-4">
|
|
<p className="font-bold text-teal-800 text-sm mb-2">
|
|
3-Step Process for Illustration Questions
|
|
</p>
|
|
<div className="space-y-2">
|
|
{[
|
|
[
|
|
"1",
|
|
"RESTATE the claim in the question stem in your own words. Identify the exact quality or action it describes.",
|
|
],
|
|
[
|
|
"2",
|
|
"PREDICT what kind of language would illustrate it — positive/negative tone, specific action, direct statement?",
|
|
],
|
|
[
|
|
"3",
|
|
"ELIMINATE quotations that: (a) are too vague, (b) refer to the wrong speaker, (c) describe a different quality entirely.",
|
|
],
|
|
].map(([n, text]) => (
|
|
<div key={n} className="flex gap-2">
|
|
<span className="w-5 h-5 rounded-full bg-teal-600 text-white flex items-center justify-center text-xs font-bold shrink-0">
|
|
{n}
|
|
</span>
|
|
<p className="text-xs text-slate-700">{text}</p>
|
|
</div>
|
|
))}
|
|
</div>
|
|
</div>
|
|
<p className="font-semibold text-sm text-slate-800">
|
|
Key traps in illustration questions — tap to reveal:
|
|
</p>
|
|
<RevealCardGrid
|
|
cards={ILLUSTRATION_TRAPS}
|
|
columns={2}
|
|
accentColor="teal"
|
|
/>
|
|
</div>
|
|
|
|
{/* 5B */}
|
|
<div className="scroll-reveal stagger-3 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
Supporting a Claim
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
Support questions ask: "Which finding would MOST DIRECTLY support
|
|
this conclusion?" The correct answer must provide new evidence
|
|
consistent with the claim — it doesn't just repeat what the
|
|
passage already states.
|
|
</p>
|
|
<div className="grid grid-cols-1 sm:grid-cols-2 gap-4">
|
|
<div className="card-tilt bg-green-50 border border-green-200 rounded-xl p-4">
|
|
<p className="font-bold text-green-800 text-sm mb-2">
|
|
What Makes a Valid Support?
|
|
</p>
|
|
<ul className="text-xs text-slate-600 space-y-1">
|
|
<li>
|
|
• Provides a NEW example or finding, not a restatement.
|
|
</li>
|
|
<li>
|
|
• Is directly consistent with the specific mechanism
|
|
described.
|
|
</li>
|
|
<li>• Makes the claim MORE likely to be true.</li>
|
|
<li>
|
|
• Common patterns: X causes Y → new example of X causing Y;
|
|
More X → more Y → find a case where less X → less Y.
|
|
</li>
|
|
</ul>
|
|
</div>
|
|
<div className="card-tilt bg-red-50 border border-red-200 rounded-xl p-4">
|
|
<p className="font-bold text-red-800 text-sm mb-2">
|
|
What Looks Like Support But Isn't
|
|
</p>
|
|
<ul className="text-xs text-slate-600 space-y-1">
|
|
<li>
|
|
• The answer discusses the right topic but a different
|
|
aspect of it.
|
|
</li>
|
|
<li>
|
|
• The answer is consistent with the general field but not
|
|
the specific claim.
|
|
</li>
|
|
<li>
|
|
• The answer only restates part of what the passage already
|
|
said.
|
|
</li>
|
|
<li>
|
|
• The answer is factually true but would also be true
|
|
regardless of the claim.
|
|
</li>
|
|
</ul>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
|
|
{/* 5C */}
|
|
<div className="scroll-reveal stagger-4 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
Undermining a Claim
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
Undermine questions have the same structure as support questions,
|
|
but in reverse. The correct answer must provide information that
|
|
makes the claim LESS likely to be true.
|
|
</p>
|
|
<div className="bg-amber-50 border border-amber-200 rounded-xl p-4">
|
|
<p className="font-bold text-amber-800 text-sm mb-1">
|
|
KEY TECHNIQUE — Flip the Claim
|
|
</p>
|
|
<p className="text-xs text-slate-700">
|
|
If the claim is "high metabolic rate = survival advantage," then
|
|
to undermine it you need evidence that high metabolic rate does
|
|
NOT produce survival advantage (e.g., many high-metabolic
|
|
creatures went extinct).
|
|
</p>
|
|
</div>
|
|
<p className="font-semibold text-sm text-slate-800">
|
|
Common undermine traps:
|
|
</p>
|
|
{[
|
|
"The answer is unrelated to the claim rather than contradictory to it — an unrelated finding doesn't undermine anything.",
|
|
"The answer challenges a secondary detail, not the core mechanism being tested.",
|
|
"The answer actually supports the claim but is framed in negative-sounding language.",
|
|
].map((trap, i) => (
|
|
<div
|
|
key={i}
|
|
className="flex gap-2 bg-red-50 border border-red-100 rounded-lg px-3 py-2"
|
|
>
|
|
<span className="text-red-500 font-bold shrink-0 text-xs">
|
|
✗
|
|
</span>
|
|
<p className="text-xs text-slate-600">{trap}</p>
|
|
</div>
|
|
))}
|
|
</div>
|
|
|
|
{/* 5D */}
|
|
<div className="scroll-reveal stagger-5 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
Validity of Conclusions — tap to reveal each type:
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
Some questions ask whether a finding is valid — whether it
|
|
necessarily follows from the research described.
|
|
</p>
|
|
<RevealCardGrid
|
|
cards={VALIDITY_TYPES}
|
|
columns={2}
|
|
accentColor="teal"
|
|
/>
|
|
</div>
|
|
|
|
<div className="scroll-reveal-scale golden-rule-glow bg-teal-900 text-white rounded-2xl p-5 mb-8">
|
|
<p className="font-bold mb-1">Golden Rule — Textual Evidence</p>
|
|
<p className="text-sm text-teal-100">
|
|
The question always tells you the required relationship
|
|
(illustrate / support / undermine). An answer that accurately
|
|
quotes the passage but has the WRONG relationship is still wrong.
|
|
Identify the relationship first, accuracy second.
|
|
</p>
|
|
</div>
|
|
</section>
|
|
|
|
{/* Section 1 — Quantitative Evidence */}
|
|
<section
|
|
ref={(el) => {
|
|
sectionsRef.current[1] = el;
|
|
}}
|
|
className="min-h-screen flex flex-col justify-center mb-24"
|
|
>
|
|
<h2 className="text-4xl font-extrabold text-slate-900 mb-2">
|
|
Quantitative Evidence
|
|
</h2>
|
|
<p className="text-lg text-slate-500 mb-8">
|
|
Graphs and tables — the mandatory order of operations and criteria
|
|
matching technique.
|
|
</p>
|
|
|
|
{/* 5E */}
|
|
<div className="scroll-reveal stagger-1 rounded-2xl p-6 mb-8 bg-amber-50 border border-amber-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-amber-900">
|
|
Non-Negotiable Order of Operations
|
|
</h3>
|
|
<p className="text-sm text-slate-700">
|
|
The most important principle:{" "}
|
|
<strong>the graphic alone is never sufficient.</strong> You must
|
|
begin with the passage and the question to know what to look for
|
|
in the graphic.
|
|
</p>
|
|
<div className="bg-white border border-amber-200 rounded-xl p-4">
|
|
<p className="font-bold text-amber-800 text-sm mb-3">
|
|
MANDATORY Order for Graph Questions
|
|
</p>
|
|
<div className="space-y-2">
|
|
{[
|
|
[
|
|
"1",
|
|
"Read the PASSAGE (especially the last sentence — this states the claim).",
|
|
],
|
|
[
|
|
"2",
|
|
"Read the QUESTION STEM carefully to identify exactly what you are being asked.",
|
|
],
|
|
[
|
|
"3",
|
|
"Extract the CRITERIA from the claim: what specific conditions must be met?",
|
|
],
|
|
[
|
|
"4",
|
|
"THEN look at the GRAPHIC with those criteria in mind.",
|
|
],
|
|
[
|
|
"5",
|
|
"Match the answer that satisfies ALL criteria — not just part of them.",
|
|
],
|
|
].map(([n, text]) => (
|
|
<div key={n} className="flex gap-2">
|
|
<span className="w-5 h-5 rounded-full bg-amber-600 text-white flex items-center justify-center text-xs font-bold shrink-0">
|
|
{n}
|
|
</span>
|
|
<p className="text-xs text-slate-700">{text}</p>
|
|
</div>
|
|
))}
|
|
</div>
|
|
</div>
|
|
<div className="bg-red-50 border border-red-200 rounded-xl p-4">
|
|
<p className="font-bold text-red-800 text-sm mb-1">
|
|
CRITICAL WARNING
|
|
</p>
|
|
<p className="text-xs text-slate-700">
|
|
Looking at the graph first is one of the most costly errors on
|
|
graph questions. Multiple answer choices will accurately
|
|
describe the graph — only ONE will match the specific claim in
|
|
the passage. The graphic alone cannot tell you which one is
|
|
correct.
|
|
</p>
|
|
</div>
|
|
</div>
|
|
|
|
{/* 5F */}
|
|
<div className="scroll-reveal stagger-2 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-3">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
When You Do NOT Need the Graph
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
Many graph questions can be answered using only the passage and
|
|
the answer choices — without looking at the graph at all.
|
|
</p>
|
|
<ul className="space-y-2 text-sm text-slate-700">
|
|
<li className="flex gap-2">
|
|
<span className="text-teal-600 font-bold shrink-0">•</span>If
|
|
answer choices contain wording clearly inconsistent with the
|
|
passage's claim, eliminate them immediately.
|
|
</li>
|
|
<li className="flex gap-2">
|
|
<span className="text-teal-600 font-bold shrink-0">•</span>
|
|
Answers addressing the wrong aspect of the claim (wrong time
|
|
period, wrong variable, wrong group) can be eliminated before
|
|
consulting the graph.
|
|
</li>
|
|
<li className="flex gap-2">
|
|
<span className="text-teal-600 font-bold shrink-0">•</span>Once
|
|
only one answer remains that is consistent with the claim, that
|
|
is correct — verifying against the graph is optional.
|
|
</li>
|
|
</ul>
|
|
<div className="bg-teal-50 border border-teal-200 rounded-xl p-4">
|
|
<p className="font-bold text-teal-800 text-xs mb-1">EXAMPLE</p>
|
|
<p className="text-xs text-slate-700">
|
|
Passage claims "print books are preferred in certain
|
|
situations." Any answer describing a situation where e-books are
|
|
preferred can be immediately eliminated — without looking at the
|
|
chart.
|
|
</p>
|
|
</div>
|
|
</div>
|
|
|
|
{/* 5G */}
|
|
<div className="scroll-reveal stagger-3 rounded-2xl p-6 mb-8 bg-white border border-slate-200 space-y-4">
|
|
<h3 className="text-lg font-bold text-slate-900">
|
|
Criteria Matching for Quantitative Questions
|
|
</h3>
|
|
<p className="text-sm text-slate-600">
|
|
The most common error: choosing an answer that accurately
|
|
describes the graph but fails to match ALL criteria specified in
|
|
the claim. Build a checklist before looking at the data.
|
|
</p>
|
|
<div className="card-tilt bg-teal-50 border border-teal-200 rounded-xl p-4">
|
|
<p className="font-bold text-teal-800 text-sm mb-2">
|
|
Building a Criteria Checklist
|
|
</p>
|
|
<ul className="text-xs text-slate-600 space-y-1">
|
|
<li>• List every specific condition mentioned in the claim.</li>
|
|
<li>
|
|
• Example: "rebounded AND reached highest level in 60 years" =
|
|
2 separate criteria.
|
|
</li>
|
|
<li>
|
|
• An answer meeting only Criterion 1 but not Criterion 2 is
|
|
wrong, even if it accurately describes the graph.
|
|
</li>
|
|
<li>
|
|
• Write criteria on scratch paper before looking at graphic.
|
|
</li>
|
|
</ul>
|
|
</div>
|
|
<p className="font-semibold text-sm text-slate-800">
|
|
Common Quantitative Wrong Answers — tap to reveal:
|
|
</p>
|
|
<RevealCardGrid
|
|
cards={QUANT_WRONG_ANSWERS}
|
|
columns={3}
|
|
accentColor="teal"
|
|
/>
|
|
</div>
|
|
|
|
<div className="scroll-reveal-scale golden-rule-glow bg-teal-900 text-white rounded-2xl p-5 mb-8">
|
|
<p className="font-bold mb-1">
|
|
Golden Rule — Quantitative Evidence
|
|
</p>
|
|
<p className="text-sm text-teal-100">
|
|
Always read the passage and question first to extract criteria.
|
|
Multiple answer choices will accurately describe the graph — only
|
|
ONE matches the specific claim. "Right data, wrong claim aspect"
|
|
is the most common wrong answer type.
|
|
</p>
|
|
</div>
|
|
</section>
|
|
|
|
{/* Section 2 — Evidence Hunter widget */}
|
|
<section
|
|
ref={(el) => {
|
|
sectionsRef.current[2] = el;
|
|
}}
|
|
className="min-h-screen flex flex-col justify-center mb-24"
|
|
>
|
|
<h2 className="text-4xl font-extrabold text-slate-900 mb-2">
|
|
Evidence Hunter
|
|
</h2>
|
|
<p className="text-lg text-slate-500 mb-8">
|
|
Find the sentence that has the exact relationship the question
|
|
requires.
|
|
</p>
|
|
<EvidenceHunterWidget
|
|
exercises={EVIDENCE_EXERCISES}
|
|
accentColor="teal"
|
|
/>
|
|
</section>
|
|
|
|
{/* Section 3 — Practice */}
|
|
<section
|
|
ref={(el) => {
|
|
sectionsRef.current[3] = el;
|
|
}}
|
|
className="min-h-screen flex flex-col justify-center mb-24"
|
|
>
|
|
<h2 className="text-4xl font-extrabold text-slate-900 mb-6">
|
|
Practice Questions
|
|
</h2>
|
|
{COMMAND_EVIDENCE_EASY.slice(0, 2).map((q) => (
|
|
<PracticeFromDataset key={q.id} question={q} color="teal" />
|
|
))}
|
|
{COMMAND_EVIDENCE_MEDIUM.slice(0, 1).map((q) => (
|
|
<PracticeFromDataset key={q.id} question={q} color="teal" />
|
|
))}
|
|
<div className="mt-8 text-center">
|
|
<button
|
|
onClick={onFinish}
|
|
className="px-6 py-3 bg-teal-900 text-white font-bold rounded-full hover:bg-teal-700 transition-colors"
|
|
>
|
|
Finish Lesson ✓
|
|
</button>
|
|
</div>
|
|
</section>
|
|
</div>
|
|
</div>
|
|
);
|
|
};
|
|
|
|
export default EBRWCommandEvidenceLesson;
|