import React, { useRef, useState, useEffect } from "react"; import { Check, BookOpen, BarChart3, Zap, Target } from "lucide-react"; import { PracticeFromDataset } from "../../../components/lessons/LessonShell"; import { COMMAND_EVIDENCE_EASY, COMMAND_EVIDENCE_MEDIUM, } from "../../../data/rw/command-of-evidence"; import EvidenceHunterWidget, { type EvidenceExercise, } from "../../../components/lessons/EvidenceHunterWidget"; import RevealCardGrid, { type RevealCard, } from "../../../components/lessons/RevealCardGrid"; import useScrollReveal from "../../../components/lessons/useScrollReveal"; interface LessonProps { onFinish?: () => void; } /* ── Data for RevealCardGrid widgets ── */ const ILLUSTRATION_TRAPS: RevealCard[] = [ { label: "Wrong speaker", content: "The quotation features the correct idea but from a different character.", }, { label: "Right topic, wrong direction", content: "The quotation mentions the topic but doesn't illustrate the specific claim.", }, { label: "Too indirect", content: "The connection between quotation and claim requires too much inferential leaping.", }, { label: "Question marks", content: 'A rhetorical question often cannot "illustrate" a direct claim.', }, ]; const VALIDITY_TYPES: RevealCard[] = [ { label: "Valid / Necessary", sublabel: "CORRECT on SAT", content: "Must be true given the evidence; the only logical conclusion. Example: If 14% are supershear events → 86% are not.", }, { label: "Possible / Speculative", sublabel: "WRONG on SAT", content: 'Might be true but the evidence doesn\'t require it. Example: "Researchers must want more funding" — not stated.', }, { label: "Contradicted", sublabel: "WRONG on SAT", content: 'Directly conflicts with information stated in the passage. Example: "Exercise improves fitness equally for all" — passage says otherwise.', }, { label: "Off-topic", sublabel: "WRONG on SAT", content: "No logical connection to the claim or evidence. Example: Ocean temperature claim when passage is about land volcanoes.", }, ]; const QUANT_WRONG_ANSWERS: RevealCard[] = [ { label: "Wrong subgroup / time period", content: "Accurate data about the WRONG subgroup or time period.", }, { label: "Wrong direction", content: "Accurate comparison in the WRONG direction (A > B when claim needs B > A).", }, { label: "Wrong number of groups", content: "Involves TWO groups when the claim is about ONE group only.", }, { label: "Contradictory trend", content: "Describes a trend that contradicts the claim despite accurate numbers.", }, { label: "Right data, wrong claim", content: "Describes the graph accurately but doesn't address the specific claim.", }, ]; const EVIDENCE_EXERCISES: EvidenceExercise[] = [ { question: "The researcher concludes that urban green spaces reduce stress. Which sentence from the study best SUPPORTS this conclusion?", passage: [ "Participants were randomly assigned to walk for 30 minutes in either an urban park or a busy commercial district.", "Before and after each walk, cortisol levels were measured using saliva samples.", "Participants who walked in the park showed a 15% reduction in cortisol, a primary stress hormone.", "Those who walked in the commercial district showed no significant change in cortisol levels.", "Participants reported feeling calmer after the park walk, though self-report data is inherently subjective.", ], evidenceIndex: 2, explanation: "Sentence 3 provides direct biological evidence (cortisol reduction) that supports the claim about stress reduction. It uses objective measurement rather than self-report, making it the strongest support for the stated conclusion.", }, { question: "Which sentence from this passage most effectively ILLUSTRATES the claim that microplastics are now found in unexpected locations?", passage: [ "Microplastics are plastic fragments smaller than 5 millimeters.", "They originate from the breakdown of larger plastic items or are manufactured at microscopic size.", "Researchers have detected microplastics in the peak snowpack of Mount Everest.", "Microplastics have also been found in human blood, lung tissue, and placentas.", "The long-term health effects of microplastic exposure are still being studied.", ], evidenceIndex: 2, explanation: "Sentence 3 best illustrates the claim about unexpected locations because Mount Everest is one of the most remote places on Earth — finding microplastics there is a striking, concrete example of how pervasive contamination has become.", }, ]; const EBRWCommandEvidenceLesson: React.FC = ({ onFinish }) => { const [activeSection, setActiveSection] = useState(0); const sectionsRef = useRef<(HTMLElement | null)[]>([]); useEffect(() => { const observers: IntersectionObserver[] = []; sectionsRef.current.forEach((el, idx) => { if (!el) return; const obs = new IntersectionObserver( ([entry]) => { if (entry.isIntersecting) setActiveSection(idx); }, { threshold: 0.3 }, ); obs.observe(el); observers.push(obs); }); return () => observers.forEach((o) => o.disconnect()); }, []); useScrollReveal(); const scrollToSection = (index: number) => { setActiveSection(index); sectionsRef.current[index]?.scrollIntoView({ behavior: "smooth" }); }; const SectionMarker = ({ index, title, icon: Icon, }: { index: number; title: string; icon: React.ComponentType>; }) => { const isActive = activeSection === index; const isPast = activeSection > index; return ( ); }; return (
{/* Section 0 — Textual Evidence */}
{ sectionsRef.current[0] = el; }} className="min-h-screen flex flex-col justify-center mb-24 pt-20 lg:pt-0" >
Information & Ideas — Domain 2

Command of Evidence

Move beyond the passage to apply its ideas. Two subtypes: Textual Evidence (quotations) and Quantitative Evidence (graphs and tables).

Overview: Command of Evidence questions ask you to move BEYOND the passage to apply its ideas. You will identify quotations or data that illustrate, support, or undermine a specific claim. There are two main subtypes: Textual Evidence (using quotations or passages) and Quantitative Evidence (using graphs and tables).

{/* 5A */}

Illustrating a Claim (Quotation Selection)

These questions ask you to find the quotation from a poem, story, or passage that best illustrates a claim stated in the question stem. The claim is explicitly given to you — your job is to match it to the correct quotation.

3-Step Process for Illustration Questions

{[ [ "1", "RESTATE the claim in the question stem in your own words. Identify the exact quality or action it describes.", ], [ "2", "PREDICT what kind of language would illustrate it — positive/negative tone, specific action, direct statement?", ], [ "3", "ELIMINATE quotations that: (a) are too vague, (b) refer to the wrong speaker, (c) describe a different quality entirely.", ], ].map(([n, text]) => (
{n}

{text}

))}

Key traps in illustration questions — tap to reveal:

{/* 5B */}

Supporting a Claim

Support questions ask: "Which finding would MOST DIRECTLY support this conclusion?" The correct answer must provide new evidence consistent with the claim — it doesn't just repeat what the passage already states.

What Makes a Valid Support?

  • • Provides a NEW example or finding, not a restatement.
  • • Is directly consistent with the specific mechanism described.
  • • Makes the claim MORE likely to be true.
  • • Common patterns: X causes Y → new example of X causing Y; More X → more Y → find a case where less X → less Y.

What Looks Like Support But Isn't

  • • The answer discusses the right topic but a different aspect of it.
  • • The answer is consistent with the general field but not the specific claim.
  • • The answer only restates part of what the passage already said.
  • • The answer is factually true but would also be true regardless of the claim.
{/* 5C */}

Undermining a Claim

Undermine questions have the same structure as support questions, but in reverse. The correct answer must provide information that makes the claim LESS likely to be true.

KEY TECHNIQUE — Flip the Claim

If the claim is "high metabolic rate = survival advantage," then to undermine it you need evidence that high metabolic rate does NOT produce survival advantage (e.g., many high-metabolic creatures went extinct).

Common undermine traps:

{[ "The answer is unrelated to the claim rather than contradictory to it — an unrelated finding doesn't undermine anything.", "The answer challenges a secondary detail, not the core mechanism being tested.", "The answer actually supports the claim but is framed in negative-sounding language.", ].map((trap, i) => (

{trap}

))}
{/* 5D */}

Validity of Conclusions — tap to reveal each type:

Some questions ask whether a finding is valid — whether it necessarily follows from the research described.

Golden Rule — Textual Evidence

The question always tells you the required relationship (illustrate / support / undermine). An answer that accurately quotes the passage but has the WRONG relationship is still wrong. Identify the relationship first, accuracy second.

{/* Section 1 — Quantitative Evidence */}
{ sectionsRef.current[1] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Quantitative Evidence

Graphs and tables — the mandatory order of operations and criteria matching technique.

{/* 5E */}

Non-Negotiable Order of Operations

The most important principle:{" "} the graphic alone is never sufficient. You must begin with the passage and the question to know what to look for in the graphic.

MANDATORY Order for Graph Questions

{[ [ "1", "Read the PASSAGE (especially the last sentence — this states the claim).", ], [ "2", "Read the QUESTION STEM carefully to identify exactly what you are being asked.", ], [ "3", "Extract the CRITERIA from the claim: what specific conditions must be met?", ], [ "4", "THEN look at the GRAPHIC with those criteria in mind.", ], [ "5", "Match the answer that satisfies ALL criteria — not just part of them.", ], ].map(([n, text]) => (
{n}

{text}

))}

CRITICAL WARNING

Looking at the graph first is one of the most costly errors on graph questions. Multiple answer choices will accurately describe the graph — only ONE will match the specific claim in the passage. The graphic alone cannot tell you which one is correct.

{/* 5F */}

When You Do NOT Need the Graph

Many graph questions can be answered using only the passage and the answer choices — without looking at the graph at all.

  • If answer choices contain wording clearly inconsistent with the passage's claim, eliminate them immediately.
  • Answers addressing the wrong aspect of the claim (wrong time period, wrong variable, wrong group) can be eliminated before consulting the graph.
  • Once only one answer remains that is consistent with the claim, that is correct — verifying against the graph is optional.

EXAMPLE

Passage claims "print books are preferred in certain situations." Any answer describing a situation where e-books are preferred can be immediately eliminated — without looking at the chart.

{/* 5G */}

Criteria Matching for Quantitative Questions

The most common error: choosing an answer that accurately describes the graph but fails to match ALL criteria specified in the claim. Build a checklist before looking at the data.

Building a Criteria Checklist

  • • List every specific condition mentioned in the claim.
  • • Example: "rebounded AND reached highest level in 60 years" = 2 separate criteria.
  • • An answer meeting only Criterion 1 but not Criterion 2 is wrong, even if it accurately describes the graph.
  • • Write criteria on scratch paper before looking at graphic.

Common Quantitative Wrong Answers — tap to reveal:

Golden Rule — Quantitative Evidence

Always read the passage and question first to extract criteria. Multiple answer choices will accurately describe the graph — only ONE matches the specific claim. "Right data, wrong claim aspect" is the most common wrong answer type.

{/* Section 2 — Evidence Hunter widget */}
{ sectionsRef.current[2] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Evidence Hunter

Find the sentence that has the exact relationship the question requires.

{/* Section 3 — Practice */}
{ sectionsRef.current[3] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Practice Questions

{COMMAND_EVIDENCE_EASY.slice(0, 2).map((q) => ( ))} {COMMAND_EVIDENCE_MEDIUM.slice(0, 1).map((q) => ( ))}
); }; export default EBRWCommandEvidenceLesson;