import React, { useRef, useState, useEffect } from "react"; import { Check, BookOpen, Lightbulb, Zap, Target } from "lucide-react"; import { PracticeFromDataset } from "../../../components/lessons/LessonShell"; import { INFERENCES_EASY, INFERENCES_MEDIUM, } from "../../../data/rw/inferences"; import EvidenceHunterWidget, { type EvidenceExercise, } from "../../../components/lessons/EvidenceHunterWidget"; import RevealCardGrid, { type RevealCard, } from "../../../components/lessons/RevealCardGrid"; import useScrollReveal from "../../../components/lessons/useScrollReveal"; interface LessonProps { onFinish?: () => void; } /* ── Data for RevealCardGrid widgets ── */ const INFERENCE_TYPES: RevealCard[] = [ { label: "Valid / Necessary", sublabel: "CORRECT on SAT", content: "Must be true if the stated evidence is true. Example: If 14% of earthquakes are supershear → 86% are NOT supershear.", }, { label: "Possible / Speculative", sublabel: "WRONG on SAT", content: 'Could be true, but might not be; goes further than the evidence. Example: "Researchers must want more funding" — not stated.', }, { label: "Contradicted", sublabel: "WRONG on SAT", content: 'Directly conflicts with stated evidence. Example: "Exercise improves fitness equally for all" when passage says otherwise.', }, { label: "Real-world true but unsupported", sublabel: "WRONG on SAT", content: "True in reality but not implied by the passage. The SAT only rewards what the text guarantees.", }, { label: "Half-valid", sublabel: "WRONG on SAT", content: "First part follows from the evidence, but the second part goes beyond what the evidence requires.", }, ]; const CHAIN_ERRORS: RevealCard[] = [ { label: "Stopping too early", content: 'Stopping at the finding rather than the conclusion (e.g., stopping at "feldspar was found" rather than following its implication for the competing theories).', }, { label: "Skipping a step", content: "Jumping to a conclusion that goes further than the chain allows by skipping an intermediate logical step.", }, { label: "Scope confusion", content: "The chain applies to one specific region or group, but the answer makes a universal claim about all regions or groups.", }, { label: "Time assumption", content: "The evidence describes a current or past state, and the answer makes a claim about a future state without justification.", }, ]; const EVIDENCE_EXERCISES: EvidenceExercise[] = [ { question: "Based on the passage, which sentence provides the strongest basis for inferring that Dr. Patel's research contradicts established scientific consensus?", passage: [ "Dr. Patel has spent fifteen years studying migration patterns of monarch butterflies.", "Her field stations span the entire North American flyway, from Canada to Mexico.", "She has published 47 peer-reviewed papers, each building on data collected across multiple seasons.", "Her most recent paper challenges the assumption that butterflies navigate primarily by magnetic fields.", "Instead, she proposes that polarized light plays a more significant role than previously recognized.", ], evidenceIndex: 3, explanation: 'Sentence 4 is the basis for inferring contradiction with consensus. The word "challenges" indicates Dr. Patel is actively contradicting an established "assumption." This makes the inference valid — not speculation — because the passage directly states she is challenging the field.', }, { question: "Which sentence provides the strongest basis for inferring that the plastic bag ban had unintended consequences?", passage: [ "In 2015, the city council banned plastic bags at all grocery stores.", "The ban was intended to reduce plastic waste in local waterways.", "Plastic bag litter in rivers decreased by 40% in the first year.", "However, sales of heavier-gauge trash bags increased by 350% over the same period.", "Environmental analysts noted that thick trash bags contain more plastic by weight than the thin bags they replaced.", ], evidenceIndex: 3, explanation: "Sentence 4 is the evidence of the unintended consequence — a massive surge in heavier plastic bag purchases. Combined with sentence 5, it implies total plastic use may have increased, the opposite of the ban's stated goal.", }, ]; const EBRWInferencesLesson: React.FC = ({ onFinish }) => { const [activeSection, setActiveSection] = useState(0); const sectionsRef = useRef<(HTMLElement | null)[]>([]); useEffect(() => { const observers: IntersectionObserver[] = []; sectionsRef.current.forEach((el, idx) => { if (!el) return; const obs = new IntersectionObserver( ([entry]) => { if (entry.isIntersecting) setActiveSection(idx); }, { threshold: 0.3 }, ); obs.observe(el); observers.push(obs); }); return () => observers.forEach((o) => o.disconnect()); }, []); useScrollReveal(); const scrollToSection = (index: number) => { setActiveSection(index); sectionsRef.current[index]?.scrollIntoView({ behavior: "smooth" }); }; const SectionMarker = ({ index, title, icon: Icon, }: { index: number; title: string; icon: React.ElementType; }) => { const isActive = activeSection === index; const isPast = activeSection > index; return ( ); }; return (
{/* Section 0 — Valid Inferences */}
{ sectionsRef.current[0] = el; }} className="min-h-screen flex flex-col justify-center mb-24 pt-20 lg:pt-0" >
Information & Ideas — Domain 2

Inferences

A valid inference MUST be true based on the passage — not merely possible, plausible, or consistent.

{/* Core Concept — now with RevealCardGrid */}

The Core Concept — What Makes an Inference Valid?

A valid inference is a statement that MUST be true if the evidence is true. It cannot merely be likely, plausible, or consistent. The SAT rewards only conclusions that necessarily follow from what is stated.

{/* 3-Step Text Completion Process */}

The 3-Step Text Completion Process

Text completion questions present a short passage followed by a blank at the end, asking "Which choice most logically completes the text?" The correct answer NECESSARILY follows from the stated evidence — not merely the most interesting or plausible continuation.

Universal Method for All Inference Questions

{[ [ "1", "IDENTIFY the key claim or evidence (usually in the 1–2 sentences before the blank). Write it in 3–6 words on scratch paper.", ], [ "2", 'WORK OUT the implication: "If this is true, what MUST also be true?" Do this BEFORE looking at answers. Even one word helps.', ], [ "3", "MATCH the answer: look for the option that says the same thing as your implication, possibly using different words, negation, or synonyms.", ], ].map(([n, text]) => (
{n}

{text}

))}

WHY THIS MATTERS

If you look at answer choices before working out the implication, you are vulnerable to speculation traps — answers that sound plausible because they extend the idea in a reasonable direction, but go further than the evidence requires.

{/* Section 1 — Inference Patterns */}
{ sectionsRef.current[1] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Inference Patterns

The four core patterns, speculation traps, double negatives, and multi-step chains.

{/* 6A — 4 Core Patterns */}

The Four Core Valid Inference Patterns

{[ { num: 1, pattern: "Negation / Contrapositive", rule: "If only X has property Y, then everything that is not-X must lack property Y. This works in both directions.", examples: [ "If 14% of earthquakes are supershear events → 86% are NOT supershear events.", "If a phenomenon occurs ONLY during slow-wave sleep → it does NOT occur during REM sleep.", "If researchers focused mostly on land → most data comes from land, therefore they may have undercounted non-land events.", ], }, { num: 2, pattern: "Relative Comparison", rule: "If X is more than Y, you can restate this as Y is less than X. If X is the most, you can infer that all others are less than X.", examples: [ "If muscular contractions when lowering weights are MOST effective → contractions when raising weights are LESS effective.", "If big brown bats emit the most cries → all other species emit fewer cries.", "Restatement direction: if the claim is A > B, a valid answer might say B < A.", ], }, { num: 3, pattern: "Logical Elimination", rule: "When possibilities are listed and most are ruled out, the remaining possibility is the valid inference.", examples: [ "Researchers studied supershear earthquakes mostly on land → they did not study underwater earthquakes → many supershear earthquakes likely occur underwater.", "Two theories for Mars's crust: (a) magma ocean or (b) different origin. If feldspar (associated with b) is found → theory (a) alone does not explain the crust.", ], }, { num: 4, pattern: "Causal / Consequential Extension", rule: "If X causes Y, then applying X to a new situation should produce Y. Removing X should reduce Y.", examples: [ "If replay during slow-wave sleep consolidates memory → dancers who sleep several hours right after learning a routine should remember it better two weeks later.", "If hyperglycemia (high blood sugar) reduces exercise response → a drug that lowers blood sugar should improve exercise response.", "Apply the mechanism to a new group, new time period, or new scenario — but STAY within the same causal framework.", ], }, ].map((p) => (
{p.num}

{p.pattern}

{p.rule}

    {p.examples.map((ex, i) => (
  • • {ex}
  • ))}
))}
{/* 6B — Speculation Traps */}

Speculation Traps

Speculation traps are the most common wrong answer type in text completions. They go one plausible step too far beyond what the evidence necessarily implies.

How to Identify a Speculation Trap

  • • The answer could be true but doesn't have to be.
  • • The answer introduces a new assumption not grounded in the passage.
  • • The answer requires you to imagine a scenario the passage doesn't describe.
  • • The answer goes from "this happens" to "this is the best/only/most effective way."

Worked Example

Evidence: Sea turtle conservation focuses on protecting hatchlings after they emerge.

  • ✓ VALID: Conservation focuses less on hatchlings before they emerge.
  • ✗ TRAP: Protecting hatchlings after emergence is the only effective method.
  • ✗ TRAP: Pre-emergence protection is more effective.

RULE

The word "only" in an answer choice is almost always a sign of over-speculation. Very few things on the SAT are literally "the only way." Be very suspicious of absolute claims in answer choices.

{/* 6C — Double Negatives & Second Meanings */}

Double Negatives and Second Meanings in Answer Choices

Inference answer choices frequently use double negatives (which create positive meanings) or words in their second meanings. These are designed to make correct answers look wrong to careless readers.

Double Negative Translations

  • • "Not impossible" = possible
  • • "Not unimportant" = important
  • • "Not unlike" = similar
  • • "Less harmful" = milder, but still harmful
  • • Decode completely before evaluating the answer.
  • Strategy: Replace "not un-X" with "X" and "not im-X" with "X".

Second Meanings to Know

  • • "Qualify" = limit the scope of a claim (not: meet requirements)
  • • "Sound" = valid, reliable (not: audio)
  • • "Check" = restrain, control (not: verify)
  • • "Economy" = thrift, efficiency (not: the financial system)
  • • "Reserve" = hold off on (not: book in advance)
  • When a simple common word appears as an answer, suspect a second meaning.
{/* 6D — Multi-Step Logical Chains */}

Multi-Step Logical Chains

Some text completion questions require you to follow 2–3 logical steps before arriving at the conclusion. Students most often lose points here by stopping one step too early or introducing an extra assumption.

Multi-Step Chain: Worked Example (Mars Crust)

Step 1: Two theories exist for the first Martian crust: (a) all-encompassing magma ocean, (b) different origin with high silica.

Step 2: Researchers find feldspar (associated with high-silica lava flows) in 9 locations on Mars.

Step 3: Feldspar is evidence for Theory (b) — different origin with high silica.

Step 4: If Theory (b) accounts for some locations, Theory (a) (magma ocean) could not have been ALL-ENCOMPASSING.

Valid Conclusion: The magma ocean was not all-encompassing.

Speculation Trap: "Portions of Mars' surface were never covered by a crust." (no evidence for this)

Common errors in multi-step chains:

{/* 6E — Scratch Paper */}

Using Scratch Paper for Inference Questions

For text completions, scratch paper is not optional — it is essential. Writing down even a brief summary of the key claim and your predicted answer protects you from being seduced by plausible-sounding wrong answers.

What to Write

  • • 3–6 word summary of the key claim.
  • • Arrow indicating direction: "X → Y" or "less X = more Y".
  • • Your predicted answer in 3–5 words before looking at choices.
  • • For multi-step chains: number each step (1) → (2) → (3).

What This Prevents

  • • Choosing a speculative answer because it sounded good.
  • • Forgetting the specific constraint after reading 4 answer choices.
  • • Losing track of the logical chain halfway through.
  • • Selecting half-valid answers that address only part of the claim.

Golden Rule

Test every answer with: "Does the passage GUARANTEE this is true?" If you can imagine a scenario where the passage is correct but this answer is still false, eliminate it. Only the answer that MUST be true is correct. "Only," "best," and "most effective" in answer choices = almost always over-speculation.

{/* Section 2 — Inference Tracker widget */}
{ sectionsRef.current[2] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Inference Tracker

Click the sentence that provides the strongest basis for the inference. Which sentence GUARANTEES the conclusion?

{/* Section 3 — Practice */}
{ sectionsRef.current[3] = el; }} className="min-h-screen flex flex-col justify-center mb-24" >

Practice Questions

{INFERENCES_EASY.slice(0, 2).map((q) => ( ))} {INFERENCES_MEDIUM.slice(0, 1).map((q) => ( ))}
); }; export default EBRWInferencesLesson;