diff --git a/.github/workflows/build-cv.yml b/.github/workflows/build-cv.yml new file mode 100644 index 0000000..bb01164 --- /dev/null +++ b/.github/workflows/build-cv.yml @@ -0,0 +1,76 @@ +name: Build CV + +on: + push: + branches: + - main + paths: + - 'documents/JRM_CV.tex' + - 'scripts/build_cv.py' + - 'scripts/extract_cv.py' + - 'css/cv.css' + - '.github/workflows/build-cv.yml' + pull_request: + branches: + - main + paths: + - 'documents/JRM_CV.tex' + - 'scripts/build_cv.py' + - 'scripts/extract_cv.py' + - 'css/cv.css' + - '.github/workflows/build-cv.yml' + workflow_dispatch: # Allow manual triggering + +jobs: + build-cv: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install TeX Live + run: | + sudo apt-get update + sudo apt-get install -y texlive-xetex texlive-fonts-extra texlive-latex-extra + + - name: Install Dartmouth Ruzicka font + run: | + mkdir -p ~/.fonts + cp data/DartmouthRuzicka-*.ttf ~/.fonts/ + fc-cache -fv + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' + cache-dependency-path: 'requirements-build.txt' + + - name: Install Python dependencies + run: pip install -r requirements-build.txt + + - name: Build CV (PDF and HTML) + working-directory: scripts + run: python build_cv.py + + - name: Run CV tests + run: python -m pytest tests/test_build_cv.py -v + + - name: Check for changes + id: check_changes + run: | + if [[ -n $(git status --porcelain documents/JRM_CV.pdf documents/JRM_CV.html) ]]; then + echo "changes=true" >> $GITHUB_OUTPUT + else + echo "changes=false" >> $GITHUB_OUTPUT + fi + + - name: Commit and push changes + if: github.event_name == 'push' && steps.check_changes.outputs.changes == 'true' + run: | + git config user.name 'github-actions[bot]' + git config user.email 'github-actions[bot]@users.noreply.github.com' + git add documents/JRM_CV.pdf documents/JRM_CV.html + git diff --staged --quiet || git commit -m "Auto-build: Update CV PDF and HTML from LaTeX source" + git push diff --git a/css/cv.css b/css/cv.css new file mode 100644 index 0000000..a6166f0 --- /dev/null +++ b/css/cv.css @@ -0,0 +1,624 @@ +/* ========================================================================== + CV Stylesheet + ========================================================================== */ + +/* Font Face Declarations + ========================================================================== */ + +@font-face { + font-family: 'Dartmouth Ruzicka'; + src: url('../data/DartmouthRuzicka-Regular.ttf') format('truetype'); + font-weight: normal; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: 'Dartmouth Ruzicka'; + src: url('../data/DartmouthRuzicka-Bold.ttf') format('truetype'); + font-weight: bold; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: 'Dartmouth Ruzicka'; + src: url('../data/DartmouthRuzicka-RegularItalic.ttf') format('truetype'); + font-weight: normal; + font-style: italic; + font-display: swap; +} + +@font-face { + font-family: 'Dartmouth Ruzicka'; + src: url('../data/DartmouthRuzicka-BoldItalic.ttf') format('truetype'); + font-weight: bold; + font-style: italic; + font-display: swap; +} + +/* CSS Variables + ========================================================================== */ + +:root { + --primary-green: rgb(0, 105, 62); + --bg-green: rgba(0, 105, 62, 0.2); + --dark-text: rgba(0, 0, 0, 0.7); + --light-gray: #f5f5f5; + --border-gray: #e0e0e0; + --max-width: 900px; + --spacing-unit: 1rem; +} + +/* Base Styles + ========================================================================== */ + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Dartmouth Ruzicka', Georgia, serif; + font-size: 11pt; + line-height: 1.35; + color: var(--dark-text); + background-color: white; + padding-top: 60px; /* Space for sticky download bar */ +} + +/* Download Bar + ========================================================================== */ + +.cv-download-bar { + position: fixed; + top: 0; + left: 0; + right: 0; + background-color: var(--primary-green); + color: white; + padding: 0.75rem 2rem; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15); + z-index: 1000; + display: flex; + justify-content: space-between; + align-items: center; +} + +.cv-download-bar .bar-content { + max-width: var(--max-width); + width: 100%; + margin: 0 auto; + display: flex; + justify-content: space-between; + align-items: center; +} + +.cv-download-bar .cv-title { + font-size: 1.1rem; + font-weight: bold; +} + +.cv-download-bar .download-btn { + background-color: white; + color: var(--primary-green); + border: none; + padding: 0.5rem 1.5rem; + font-family: 'Dartmouth Ruzicka', Georgia, serif; + font-size: 0.95rem; + font-weight: bold; + border-radius: 4px; + cursor: pointer; + text-decoration: none; + display: inline-block; + transition: all 0.3s ease; +} + +.cv-download-bar .download-btn:hover { + background-color: var(--bg-green); + color: white; + transform: translateY(-2px); + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); +} + +/* Main Content Container + ========================================================================== */ + +.cv-content { + max-width: var(--max-width); + margin: 2rem auto; + padding: 2rem; + background-color: white; + box-shadow: 0 0 20px rgba(0, 0, 0, 0.1); +} + +/* CV Header + ========================================================================== */ + +.cv-header { + text-align: left; + margin-bottom: 1.5rem; + padding-bottom: 0; + border-bottom: none; +} + +.cv-header h1 { + font-size: 17pt; + font-weight: normal; + color: var(--dark-text); + margin-bottom: 0; + letter-spacing: 0; +} + +.cv-header .contact-info { + font-size: 11pt; + line-height: 1.3; + color: var(--dark-text); +} + +.cv-header .contact-info p { + margin: 0; +} + +.cv-header .contact-info p.space-after { + margin-bottom: 0.5rem; +} + +/* Headings + ========================================================================== */ + +h2 { + font-size: 1.3rem; + font-weight: normal; + color: var(--dark-text); + margin-top: 2.5rem; + margin-bottom: 0.75rem; + padding-bottom: 0; + border-bottom: none; + text-transform: none; + letter-spacing: 0; +} + +h3 { + font-size: 1.05rem; + font-weight: normal; + color: var(--dark-text); + margin-top: 1.5rem; + margin-bottom: 0.5rem; +} + +h4 { + font-size: 1rem; + font-weight: normal; + font-style: italic; + color: var(--dark-text); + margin-top: 0.75rem; + margin-bottom: 0.25rem; +} + +/* Lists + ========================================================================== */ + +ul, ol { + margin-left: 2rem; + margin-bottom: 1rem; +} + +li { + margin-bottom: 0.5rem; + line-height: 1.35; +} + +/* Reverse-numbered lists for publications and awards */ +/* Use native browser support for reversed lists */ +ol[reversed] { + list-style-position: outside; + padding-left: 2.5rem; +} + +ol[reversed] > li { + padding-left: 0.5rem; +} + +ol[reversed] > li::marker { + font-weight: normal; + color: var(--dark-text); +} + +/* Paragraphs + ========================================================================== */ + +p { + margin-bottom: 0.25rem; + margin-top: 0; + text-align: justify; +} + +/* Reduce spacing for paragraphs in section content (like Advisor/Dissertation lines) */ +section p { + margin-bottom: 0.1rem; +} + +/* Section notes (from footnotes) */ +.section-note { + font-size: 0.9rem; + margin-bottom: 0.75rem; + color: var(--dark-text); +} + +/* Links + ========================================================================== */ + +a { + color: var(--primary-green); + text-decoration: none; + transition: all 0.2s ease; +} + +a:hover { + color: var(--dark-text); + text-decoration: underline; +} + +/* Text Styles + ========================================================================== */ + +.small-caps { + font-variant: small-caps; + letter-spacing: 0.5px; +} + +.underline { + text-decoration: underline; +} + +em, i { + font-style: italic; +} + +strong, b { + font-weight: bold; +} + +.block-spacer { + display: block; + height: 0.4rem; +} + +/* Two-Column Lists + ========================================================================== */ + +.two-column-list { + column-count: 2; + column-gap: 2rem; + margin-bottom: 1.5rem; +} + +.two-column-list li { + break-inside: avoid; + page-break-inside: avoid; +} + +/* Special Sections + ========================================================================== */ + +.employment-entry, +.education-entry, +.award-entry { + margin-bottom: 1.5rem; +} + +.employment-entry .position { + font-weight: bold; + color: var(--dark-text); +} + +.employment-entry .institution { + font-style: italic; + margin-left: 0.5rem; +} + +.employment-entry .dates { + color: var(--primary-green); + font-weight: bold; +} + +.education-entry .degree { + font-weight: bold; +} + +.education-entry .institution { + font-style: italic; +} + +/* Publications + ========================================================================== */ + +.publication { + margin-bottom: 1rem; + text-align: justify; +} + +.publication .authors { + font-weight: normal; +} + +.publication .title { + font-weight: bold; +} + +.publication .journal { + font-style: italic; +} + +.publication .year { + color: var(--primary-green); +} + +/* CV Footer + ========================================================================== */ + +.cv-footer { + margin-top: 4rem; + padding-top: 2rem; + border-top: 2px solid var(--primary-green); + text-align: center; + font-size: 0.9rem; + color: var(--dark-text); + font-style: italic; +} + +/* Responsive Design - Tablet + ========================================================================== */ + +@media screen and (max-width: 768px) { + body { + padding-top: 80px; + } + + .cv-download-bar { + padding: 0.75rem 1rem; + } + + .cv-download-bar .bar-content { + flex-direction: column; + gap: 0.5rem; + } + + .cv-download-bar .cv-title { + font-size: 1rem; + } + + .cv-content { + margin: 1rem; + padding: 1.5rem; + } + + .cv-header h1 { + font-size: 2rem; + } + + h2 { + font-size: 1.2rem; + } + + h3 { + font-size: 1rem; + } + + .two-column-list { + column-count: 1; + } + + ul, ol { + margin-left: 1.5rem; + } + + ol[reversed] > li { + padding-left: 2rem; + } +} + +/* Responsive Design - Mobile + ========================================================================== */ + +@media screen and (max-width: 480px) { + body { + font-size: 10pt; + padding-top: 90px; + } + + .cv-download-bar { + padding: 0.5rem; + } + + .cv-content { + margin: 0.5rem; + padding: 1rem; + box-shadow: none; + } + + .cv-header { + margin-bottom: 2rem; + padding-bottom: 1.5rem; + } + + .cv-header h1 { + font-size: 1.75rem; + } + + .cv-header .contact-info { + font-size: 0.85rem; + } + + h2 { + font-size: 1.1rem; + margin-top: 1.5rem; + } + + h3 { + font-size: 0.95rem; + } + + ul, ol { + margin-left: 1rem; + } + + ol[reversed] > li { + padding-left: 1.5rem; + } + + ol[reversed] > li::before { + min-width: 1.5rem; + } + + .cv-footer { + margin-top: 3rem; + padding-top: 1.5rem; + font-size: 0.85rem; + } +} + +/* Print Styles + ========================================================================== */ + +@media print { + @page { + margin: 0.75in; + size: letter; + } + + body { + font-size: 10pt; + padding-top: 0; + background: white; + } + + .cv-download-bar { + display: none; + } + + .cv-content { + max-width: 100%; + margin: 0; + padding: 0; + box-shadow: none; + } + + .cv-header { + page-break-after: avoid; + border-bottom: 2px solid var(--primary-green); + } + + .cv-header h1 { + font-size: 20pt; + color: var(--primary-green); + } + + h2 { + font-size: 13pt; + page-break-after: avoid; + border-bottom: 1px solid var(--border-gray); + color: var(--primary-green); + } + + h3 { + font-size: 11pt; + page-break-after: avoid; + } + + h4 { + font-size: 10pt; + page-break-after: avoid; + } + + .employment-entry, + .education-entry, + .award-entry, + .publication { + page-break-inside: avoid; + } + + li { + page-break-inside: avoid; + } + + .two-column-list { + column-count: 2; + column-gap: 1.5rem; + } + + a { + color: var(--primary-green); + text-decoration: none; + } + + a[href^="http"]:after { + content: ""; + } + + .cv-footer { + margin-top: 2rem; + padding-top: 1rem; + border-top: 1px solid var(--border-gray); + page-break-before: avoid; + } + + /* Ensure proper page breaks */ + section { + page-break-inside: avoid; + } + + /* Orphan and widow control */ + p, li { + orphans: 3; + widows: 3; + } + + /* Color adjustments for printing */ + * { + color-adjust: exact; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; + } +} + +/* Print optimization for publication lists */ +@media print { + ol[reversed] > li::before { + color: black; + } +} + +/* Utility Classes + ========================================================================== */ + +.no-break { + page-break-inside: avoid; + break-inside: avoid; +} + +.text-center { + text-align: center; +} + +.text-right { + text-align: right; +} + +.text-justify { + text-align: justify; +} + +.mb-0 { margin-bottom: 0; } +.mb-1 { margin-bottom: 0.5rem; } +.mb-2 { margin-bottom: 1rem; } +.mb-3 { margin-bottom: 1.5rem; } +.mb-4 { margin-bottom: 2rem; } + +.mt-0 { margin-top: 0; } +.mt-1 { margin-top: 0.5rem; } +.mt-2 { margin-top: 1rem; } +.mt-3 { margin-top: 1.5rem; } +.mt-4 { margin-top: 2rem; } diff --git a/data/people.xlsx b/data/people.xlsx index 2402916..49d2827 100644 Binary files a/data/people.xlsx and b/data/people.xlsx differ diff --git a/documents/JRM_CV.html b/documents/JRM_CV.html new file mode 100644 index 0000000..ddfceb4 --- /dev/null +++ b/documents/JRM_CV.html @@ -0,0 +1,723 @@ + + + + + + Jeremy R. Manning, Ph.D. - Curriculum Vitae + + + +
+
+ Curriculum Vitae + Download PDF +
+
+ +
+
+

Jeremy R. Manning, Ph.D.

+
+

Director, Contextual Dynamics Laboratory

+

Department of Psychological and Brain Sciences

+

Dartmouth College

+

HB 6207, Moore Hall

+

Hanover, NH 03755

+

U.S.A.

+

Email: jeremy.r.manning@dartmouth.edu

+

Phone: 603.646.2777

+

URL: +http://www.context-lab.com

+
+
+ +
+

Employment

+ Associate Professor, +Dartmouth College, Hanover, NH (2024 – )
+

Department of Psychological and Brain Sciences

+

Additional affiliation: Cognitive Science

+

Tenured: 2024

+Assistant Professor, +Dartmouth College, Hanover, NH (2015 – 2024)
+

Department of Psychological and Brain Sciences

+

Additional affiliation: Cognitive Science

+

Reappointed: 2018

+Postdoctoral Research + Associate, Princeton University, Princeton, NJ (2011 – 2015)
+

Princeton Neuroscience +Institute and Department of Computer Science

+

Advisors: Kenneth Norman, Ph.D. and David Blei, Ph.D.

+ +
+ +
+

Education

+ Ph.D. in Neuroscience, University of Pennsylvania, +Philadelphia, PA (2011)
+

Advisor: Michael Kahana, Ph.D.

+

Dissertation: Acquisition, storage, and + retrieval in digital and biological brains

+B.S. in Neuroscience (High honors, Magna cum laude), Brandeis University, +Waltham, MA (2006)
+

Advisor: Robert Sekuler, Ph.D.

+

Dissertation: Modeling human spatial navigation using a + degraded ideal navigator

+B.S. in Computer Science (Magna cum + laude), Brandeis University, +Waltham, MA (2006) + +
+ +
+

Grants, honors, and awards (selected)

+
    +
  1. Linda B. and Kendrick R. Wilson III 1969 Fellowship (2024–2025)
  2. +
  3. John M. Manley Huntington Award for Newly Tenured Faculty (2024)
    + + Awarded to recently tenured Dartmouth faculty members who have an outstanding record of teaching and research
  4. +
  5. CompX Faculty Grant (2024): Developing the next generation of multi-scale large language + models
    + + Award amount: $15,000; Role: PI
  6. +
  7. NSF CAREER Award (2022): Mapping and enhancing the acquisition +of conceptual knowledge using behavior, neural signals, and natural +language processing models
    + +Award amount: $881,612; Role: PI
  8. +
  9. Elected member, Memory Disorders Research Society (2021)
  10. +
  11. NIMH Grant (2021): Serotonin modulation of the development of +neural circuits underlying reward processing and impulsivity in +adolescents
    + +Award amount: $568,974; Role: Co-I (PI: Katherine Nautiyal)
  12. +
  13. NIH Grant Supplement (2019): Dissecting serotonergic and +dopaminergic contributions to the neural circuits underlying impulsive +behavior
    + +Award amount: $93,190; Role: Co-I (PI: Katherine Nautiyal)
  14. +
  15. National Institute on Drug Abuse Center for Technology and Behavioral Health Pilot Grant (2019): Linking +mental health and exercise via remote sensing
    + +Award amount: $20,000; Role: Co-PI (PI: David Bucci; Co-PI: Lorie +Loeb)
  16. +
  17. Dartmouth Junior Faculty Fellowship (2018)
  18. +
  19. Walter and Constance Burke Research Initiation Award (2018)
    + +Award amount: $25,000; Role: PI
  20. +
  21. DARPA Grant: Memory Enhancement with Modeling (MEM; 2018)
    + +Award amount: $55,558; Role: PI (sub-award of DARPA RAM +N66001-14-2-4-032)
  22. +
  23. i-CORPS Pilot Grant: Developing a mobile device for estimating +dynamic attention states (2018).
    +Award amount: $3,000; Role: Co-PI (PI: Peter Tse)
  24. +
  25. Diamond Research Development Award (2017): Improving memory and context reinstatement at perceptual event boundaries
    + +Award amount: $199,997; Role: Co-PI (PI: Barbara Jobst)
  26. +
  27. Dartmouth Leslie Center for the Humanities award for developing a +course incorporating the theme of "revolution" (2017; for +Storytelling with Data; PSYC 81.06).
    +Award amount: $5,000; +Role: Course Instructor
  28. +
  29. Social Impact Practicum (2017; for Storytelling with Data; PSYC + 81.06)
    +Award amount: $2,000; Role: Course Instructor
  30. +
  31. Young Minds and Brains (2017): The impact of exercise on attention, memory, +and stress
    + +Award amount: $100,000; Role: PI (with David Bucci, Co-PI)
  32. +
  33. NSF EPSCoR Grant (2016): The neural basis of attention
    +Award +amount: $6,000,000; Role: Co-I (PI: Peter Tse)
  34. +
  35. NIMH Ruth L. Kirshstein National Research Service Award for an + Individual Predoctoral Fellowship (2010): The neural representation of + context and its role in free recall
    +Award amount: $57,762; Role: + PI
  36. +
  37. NIH Computational Neuroscience Training Grant (2008)
    + +Role: Trainee
  38. +
  39. NIH Systems and Integrative Biology Training +Grant (2006)
    +Role: Trainee
  40. +
+ +
+ +
+

Publications

+

Undergraduate trainees are denoted by + underlined text, graduate trainees are indicated by italicized text, and postdoctoral trainees are indicated by underlined and italicized text.

+
    +
  1. Fitzpatrick PC, Heusser AC, Manning JR (2025) Text embedding models yield +high-resolution insights into conceptual knowledge from short multiple-choice +quizzes. Nature Communications: In press.
  2. +
  3. Stropkay HF, Chen J, Latifi MJ, Rockmore DN, Manning JR (2025)) A stylometric application of large language models. arXiv: 2510.21958.
  4. +
  5. Manning JR (2025) Why we're so preoccupied by the past. Scientific American, online.
  6. +
  7. Owen LLW, Manning JR (2024) High-level cognition is supported by +information-rich but compressible brain activity +patterns. Proceedings of the National Academy of Sciences, USA, 121(35): e2400082121.
  8. +
  9. Xu X, Zhu Z, Zheng X, Manning JR (2024) Temporal +asymmetries in inferring unobserved past and future events. Nature +Communications, 15: 8502.
  10. +
  11. Jolly E, Sadhukha S, Iqbal M, Molani Z, Walsh T, Manning JR, + Chang LJ (2023) People are represented and remembered through their + relationships with others. PsyArXiv: bw9r2.
  12. +
  13. Ziman K, Lee MR, Martinez AR, Manning JR (2023) Category-based +and location-based volitional covert attention are mediated by +different mechanisms and affect memory at different timescales. +PsyArXiv: 2ps6e.
  14. +
  15. Manning JR, Whitaker EC, Fitzpatrick PC, Lee MR, Frantz AM, +Bollinger BJ, Romanova D, Field CE, Heusser AC (2023) Feature and order +manipulations in a free recall task affect memory for current and future lists. +PsyArXiv: erzfp. (Under second round of reviews at Psychological Review)
  16. +
  17. Fitzpatrick PC, Manning JR (2023) +davos: a Python package "smuggler" for constructing +lightweight reproducible notebooks. SoftwareX: in press.
  18. +
  19. Manning JR (2023) Context reinstatement. In Kahana MJ and +Wagner AD, Ed. Handbook of Human Memory. New York, NY: +Oxford University Press. Chapter 38.
  20. +
  21. Manning JR (2023) Identifying +stimulus-driven neural activity patterns in multi-patient intracranial recordings. +In Axmacher N, Ed. Intracranial EEG for Cognitive + Neuroscience. New York, NY: Springer. Chapter 48.
  22. +
  23. Manning JR, Notaro GM, Chen E, Fitzpatrick PC +(2022) Fitness tracking reveals task-specific associations between +memory, mental health, and physical activity. Scientific + Reports, 12: 13822.
  24. +
  25. Kumar M, Anderson MJ, Antony JW, Baldassano C, Brooks PP, Cai MB, +Chen P-HC, Ellis CT, Henselman-Petrusek G, Huberdeau D, Hutchinson BJ, +Li PY, Lu Q, Manning JR, Mennen AC, Nastase SA, Richard H, +Schapiro AC, Schuck NW, Suo D, Turek JS, Vo VA, Wallace G, Wang Y, +Zhang H, Zhu X, Capotă M, Cohen JD, Hasson U, Li K, Ramadge PJ, +Turk-Browne NB, Willke TL, Norman KA (2022) BrainIAK: the brain +imaging analysis kit. Aperture, 1(4): 1–19.
  26. +
  27. Scangos KW, Khambhati AN, Daly PM, Owen LLW, +Manning JR, Ambrose JB, Austin E, Dawes HE, Krystal AD, Chang +EG (2021) Distributed subnetworks of depression defined by direct +intracranial neurophysiology. Frontiers in Human + Neuroscience, 15: doi.org/10.3389/fnhum.2021.746499.
  28. +
  29. Chen HT, Manning JR, van der Meer MAA (2021) +Between-subject prediction reveals a shared representational geometry +in the rodent hippocampus. Current Biology, 31: +1–12.
  30. +
  31. Owen LLW, Chang TH, Manning JR (2021) High-level +cognition during story listening is reflected in high-order dynamic +correlations in neural activity patterns. Nature + Communications, 12(5728): doi.org/10.1038/s41467-021-25876-x.
  32. +
  33. Manning JR (2021) Episodic memory: mental time travel or a +quantum 'memory wave' function? Psychological Review, 128(4): +711–725.
  34. +
  35. Chang LJ, Jolly E, Cheong JH, +Rapuano K, Greenstein N, Chen PHA, Manning JR (2021) +Endogenous variation in ventromedial prefrontal cortex state dynamics +during naturalistic viewing reflects affective +experience. Science Advances, 7(17): eabf7129.
  36. +
  37. Xie T, Cheong JH, Manning JR, Brandt AM, Aronson +JP, Jobst BC, Bujarski KA, Chang LJ (2021) Minimal functional +alignment of ventromedial prefrontal cortex intracranial EEG signals +during naturalistic viewing. bioRxiv: 443308.
  38. +
  39. Ziman K, Manning JR (2021) Unexpected false feelings of +familiarity about faces are associated with increased pupil +dilations. bioRxiv: 432360.
  40. +
  41. Heusser AC, Fitzpatrick PC, Manning JR (2021) Geometric models reveal +behavioral and neural signatures of transforming naturalistic experiences into +episodic memories. Nature Human Behaviour: +doi.org/10.1038/s41562-021-01051.
  42. +
  43. Owen LLW, Muntianu TA, Heusser AC, Daly P, Scangos K, +Manning JR (2020) A Gaussian process model of human +electrocorticographic data. {\em Cerebral Cortex}, 30(10): +5333–5345.
  44. +
  45. Chang L, Manning JR, Baldassano C, de la Vega A, Fleetwood G, +Geerligs L, Haxby J, Lahnakoski J, Parkinson C, Shappell H, Shim WM, +Wager T, Yarkoni T, Yeshurun Y, Finn E (2020) Naturalistic data +analysis: doi.org/10.5281/zenodo.3937849.
  46. +
  47. Heusser AC, Ziman K, Owen LLW, Manning JR (2018) +HyperTools: a Python toolbox for gaining geometric insights into +high-dimensional data. {\em Journal of Machine Learning Research}, 18: +1–6.
  48. +
  49. Ziman K, Heusser AC, Fitzpatrick PC, Field CE, Manning JR (2018) Is +automatic speech-to-text transcription ready for use in psychological +experiments? {\em Behavior Research Methods}: +doi.org/10.3758/s13428-018-1037-4.
  50. +
  51. Heusser AC, Manning JR (2018) Capturing the geometric structure +of episodic memories for naturalistic experiences. Conference on +Cognitive Computational Neuroscience: +doi.org/10.32470/CCN.2018.1267-0.
  52. +
  53. Manning JR, Zhu X, Willke TL, Ranganath R, Stachenfeld K, Hassan U, +Blei DM, Norman KA (2018) A probabilistic approach to discovering dynamic +full-brain functional connectivity patterns. {\em NeuroImage}, 180: +243–252.
  54. +
  55. Heusser AC, Fitzpatrick PC, Field CE, Ziman K, Manning JR +(2017) Quail: a Python toolbox for analyzing and plotting free recall data. +{\em The Journal of Open Source Software}, 2(18): 424.
  56. +
  57. Manning JR, Hulbert JC, Williams J, Piloto L, Sahakyan L, +Norman KA (2016) A neural signature of contextually mediated intentional +forgetting. {\em Psychonomic Bulletin and Review}, 23(5): 1534–1542.
  58. +
  59. Anderson MJ, Capota M, Turek JS, Zhu X, Willke TL, Wang Y, Chen P-H, +Manning JR, Ramadge PJ, Norman KA (2016) Enabling factor analysis on +thousand-subject neuroimaging datasets. IEEE Xplore, International +Conference on Big Data (BigData 2016): +doi.org/10.1109/BigData.2016.7840719.
  60. +
  61. Benson NC, Manning JR, Brainard DH (2014) Unsupervised +learning of cone spectral classes from natural images. {\em PLoS Computational +Biology}, 10(6): e1003652.
  62. +
  63. Manning JR, Ranganath R, Norman KA, Blei DM (2014) Topographic factor +analysis: a Bayesian model for inferring brain networks from neural data. {\em +PLoS One}, 9(5): e94914.
  64. +
  65. Manning JR, Lew TF, Li N, Kahana MJ, Sekuler RW (2014) +MAGELLAN: a cognitive map-based model of human wayfinding. {\em +Journal of Experimental Psychology: General}, 143(3): 1314–1330.
  66. +
  67. Manning JR, Ranganath R, Keung W, Turk-Browne N, Cohen +JD, Norman KA, Blei DM (2014) Hierarchical Topographic Factor Analysis. +IEEE Xplore, 4th International Workshop on Pattern +Recognition in Neuroimaging: doi.org/10.1109/PRNI.2014.6858530.
  68. +
  69. Manning JR, Kahana MJ, Norman KA (2014) The role of +context in memory. In Gazzaniga M, Ed. The Cognitive Neurosciences, +Fifth Edition. Cambridge, MA: MIT Press. Chapter 47.
  70. +
  71. Manning JR, Kahana MJ (2012) Interpreting semantic +clustering effects in free recall. Memory, 20(5): 511–517.
  72. +
  73. Manning JR, Sperling MR, Sharan A, Rosenberg EA, Kahana MJ (2012) +Spontaneously reactivated patterns in frontal and temporal lobe predict +semantic clustering during memory search. The Journal of Neuroscience, +32(26): 8800–8816.
  74. +
  75. Manning JR, Gershman SJ, Norman KA, Blei DM (2012) Factor +topographic latent source analysis: factor analysis for brain images. +Neural Information Processing Systems (NeurIPS) Workshop on Machine +Learning and Interpretation in Neuroimaging, 2: Online.
  76. +
  77. Manning JR, Polyn SM, Baltuch G, Litt B, Kahana MJ (2011) +Oscillatory patterns in temporal lobe reveal context reinstatement during +memory search. Proceedings of the National Academy of Sciences of the +United States of America, 108(31): 12893–12897.
  78. +
  79. Jacobs J, Manning JR, Kahana MJ (2010) Response to +Miller: "broadband" vs. "high gamma" electrocorticographic signals. +The Journal of Neuroscience, 30(19): Online.
  80. +
  81. Manning JR, Jacobs J, Fried I, Kahana MJ (2009) Broadband +shifts in local field potential power spectra are correlated with single-neuron +spiking in humans. The Journal of Neuroscience, 29(43): 13613–13620.
  82. +
  83. Manning JR, Brainard DH (2009) Optimal design of photoreceptor +mosaics: why we do not see color at night. Visual Neuroscience, 26: +5–19.
  84. +
+ +
+ +
+

Invited talks (selected)

+
    +
  1. Generative Episodic Memory: Constructing Scenarios of the Past (Keynote Speaker, 2025)
  2. +
  3. Brandeis University (2025)
  4. +
  5. University of Virginia (2024)
  6. +
  7. University of Pennsylvania (2023)
  8. +
  9. Cornell University (2023)
  10. +
  11. Boston University (2023)
  12. +
  13. Harvard University (2022)
  14. +
  15. University of California, Irvine (2022)
  16. +
  17. Ruhr Universität Bochum (2022)
  18. +
  19. Microsoft Research (2022)
  20. +
  21. Carnegie Mellon University (2021)
  22. +
  23. National Institutes of Mental Health (2021)
  24. +
  25. Boston College (2020)
  26. +
  27. Facebook Reality Labs (2020)
  28. +
  29. University of California, Berkeley (2020)
  30. +
  31. University of Oregon (2020)
  32. +
  33. Context and Episodic Memory Symposium (2019)
  34. +
  35. Society for Affective Science (2019)
  36. +
  37. Uber (2019)
  38. +
  39. Northeastern University (2018)
  40. +
  41. Society for Neuroscience (2018)
  42. +
  43. University of Pennsylvania (2018)
  44. +
  45. Bard College (2017)
  46. +
  47. Harvard University (2017)
  48. +
  49. University of Texas at Austin (2017)
  50. +
  51. Society for Neuroscience (2016)
  52. +
  53. Brown University (2015)
  54. +
  55. Columbia University (2015)
  56. +
  57. Dartmouth College (2015)
  58. +
  59. Georgetown University (2015)
  60. +
  61. Johns Hopkins University (2015)
  62. +
  63. Context and Episodic Memory Symposium (2014)
  64. +
  65. Manhattan Area Memory Meeting (2014)
  66. +
  67. Pattern Recognition in Neuroimaging (2014)
  68. +
  69. Context and Episodic Memory Symposium (2013)
  70. +
  71. University of Massachusetts, Amherst (2013)
  72. +
  73. Dartmouth College (2013)
  74. +
  75. Charles River Analytics (2012)
  76. +
  77. Natick Soldier Systems Center (2012)
  78. +
  79. Princeton University (2011)
  80. +
  81. Society for Mathematical Psychology (2011)
  82. +
  83. University of Pennsylvania (2011)
  84. +
+
+
+ +
+

Software (selected)

+
    +
  1. Manning JR, Manjunatha H, Kording K (2023) Chatify: add an LLM-based chatbot "tutor" to +Jupyter notebooks. GitHub.
  2. +
  3. Fitzpatrick PC, Manning JR (2022) Davos: import +Python packages, even if they aren't installed. GitHub.
  4. +
  5. Manning JR (2021) DataWrangler: format and clean +data, with a special focus on applying natural language processing +models to text data. GitHub.
  6. +
  7. Owen LLW, Chang TH, Manning JR (2019) Timecorr +Toolbox: compute high-order correlations in multivariate timeseries +data. GitHub.
  8. +
  9. Owen LLW, Heusser AC, Manning JR (2018) SuperEEG Toolbox: +infer full-brain activity patterns from a small(ish) number of ECoG electrodes using Gaussian process regression. GitHub.
  10. +
  11. Capota M, Turek J, Chen P-HC, Zhu X, Manning JR, Sundaram N, +Keller B, Wang Y, Shin YS (2017) BrainIAK: Brain Imaging Analysis Kit. brainiak.org.
  12. +
  13. Heusser AC, Ziman K, Fitzpatrick PC, Field CE, Manning JR +(2017) AutoFR: a scalable verbal free recall experiment with +automatic speech-to-text transcription. +GitHub.
  14. +
  15. Heusser AC, Fitzpatrick PC, Field CE, Ziman K, Manning JR +(2017) Quail: a Python toolbox for analyzing and plotting free recall +data. GitHub.
  16. +
  17. Heusser AC, Ziman K, Owen LLW, Manning JR (2017) +HyperTools: gain geometric insights into high-dimensional data (Python). GitHub.
  18. +
  19. Manning JR (2016) Hyperplot Tools: gain +geometric insights into high-dimensional data (MATLAB). + MATLAB + Central File Exchange: 56623.
  20. +
  21. Manning JR (2014) Hierarchical Topographic +Factor Analysis: + efficiently identify functional brain networks in fMRI data.
  22. +
  23. Manning JR (2013) MATLAB Ipsum: generate filler text using MATLAB. MATLAB + Central File Exchange: 43428.
  24. +
  25. Manning JR (2013) Easy resample: simple interface for + interpolating or resampling a 1d signal. MATLAB + Central File Exchange: 43320.
  26. +
  27. Manning JR (2012) Chuck Close-ify: automatically create + artwork in Chuck Close's iconic style based on existing photographs. MATLAB + Central File Exchange: 38770.
  28. +
  29. Manning JR (2012) Plot fMRI images: quick and easy method + for generating 2d and 3d brain plots. MATLAB Central File + Exchange: 36139.
  30. +
  31. Manning JR (2012) Generate synthetic fMRI data: generate + synthetic data for testing fMRI analyses and models. MATLAB Central File + Exchange: 36125.
  32. +
  33. Manning JR (2012) Sane pColor: create 2d images that + don't look blurry in OS X's Preview PDF viewer. MATLAB Central File + Exchange: 35601.
  34. +
  35. Manning JR (2012) Attach: MATLAB implementation of the attach + function in R. MATLAB Central File + Exchange: 35436.
  36. +
  37. Manning JR (2012) Get tight subplot handles: allows user to + exert finer control over subplot spacing in MATLAB. MATLAB Central File + Exchange: 35435.
  38. +
  39. Manning JR (2012) Slices: efficiently slice a tensor + along the nth dimension. MATLAB Central File + Exchange: 35439.
  40. +
+ +
+ +
+

Teaching and instruction

+
+

Open courses (selected)

+
    +
  1. Laboratory in Psychological Science; doi.org/10.5281/zenodo.6596761
  2. +
  3. Human + Memory; doi.org/10.5271/zenodo.5182787
  4. +
  5. Introduction to + Programming for Psychological Scientists; doi.org/10.5281/zenodo.5136795
  6. +
  7. Naturalistic + data analysis; doi.org/10.5281/zenodo.3937849
  8. +
  9. Storytelling + with Data; doi.org/10.5281/zenodo.5182774
  10. +
  11. Methods in Neuroscience at Dartmouth Computational + Summer School
  12. +
  13. Computational Neuroscience; doi.org/10.5281/zenodo.10235877
  14. +
+ +
+
+

Mentorship (selected)

+

Senior thesis students are denoted by asterisks + (*)

+

Postdoctoral Advisees

+
    +
  1. Hung-Tu Chen (2024 – 2025; current position: Meta)
  2. +
  3. Gina Notaro (2017 – 2018; current position: HRL Laboratories)
  4. +
  5. Andrew Heusser (2016 – 2018; current position: PyMC Labs)
  6. +
+

Graduate Advisees

+
    +
  1. Paxton Fitzpatrick (Doctoral student; 2021 – )
  2. +
  3. Xinming Xu (Doctoral student; 2021 – )
  4. +
  5. Mark Taylor (Masters student, Quantitative Biomedical Sciences; 2021)
  6. +
  7. Caroline Lee (Doctoral student; 2019 – 2021)
  8. +
  9. Max Bluestone (Masters student, Quantitative Biomedical +Sciences; 2018 – 2020)
  10. +
  11. Deepanshi Shokeen (Masters student, Quantitative Biomedical +Sciences; 2018 – 2020)
  12. +
  13. Kirsten Ziman (Doctoral student; 2017 – 2022; current position: Postdoctoral researcher at Princeton University)
  14. +
  15. Lucy Owen (Doctoral student; 2016 – 2021; current position: Assistant Professor at University of Montana)
  16. +
  17. Tom Hao Chang (Masters student, Computer Science; co-advised with +Qiang Liu; 2016 – 2017; current position: Robinhood)
  18. +
  19. Hanli Li (Masters student, Computer Science; co-advised with Qiang +Liu; 2016)
  20. +
+

Thesis Committees

+
    +
  1. Jane Han (Advisor: James Haxby)
  2. +
  3. Lindsey Tepfer (Advisor: Mark Thornton)
  4. +
  5. Arati Sharma (Advisor: Kate Nautiyal)
  6. +
  7. Clara Sava-Segal (Advisor: Emily Finn)
  8. +
  9. Manish Mohapatra (Advisor: Matthijs van der Meer; Graduated 2025)
  10. +
  11. Megan Hillis (Advisor: David Kraemer; Graduated 2025)
  12. +
  13. Omri Raccah (Advisor: David Poeppel; Graduated 2024)
  14. +
  15. Courtney Jiminez (Advisor: Meghan Meyer; Graduated 2024)
  16. +
  17. Hung-tu Chen (Advisor: Matthijs van der Meer; Graduated 2024)
  18. +
  19. Dhaval Bhatt (Advisor: Meghan Meyer; Graduated 2023)
  20. +
  21. Tiankang Xie (Advisor: Luke Chang; Graduated 2023)
  22. +
  23. Vassiki Chauhan (Advisors: Ida Gobbini and James Haxby; Graduated 2021)
  24. +
  25. Emily Irvine (Advisor: Matthijs van der Meer; Graduated 2020)
  26. +
  27. Eli Bowen (Advisor: Richard Granger; Graduated 2020)
  28. +
  29. Eshin Jolly (Advisor: Luke Chang; Graduated 2020)
  30. +
  31. Stephen Meisenhelter (Advisor: Barbara Jobst; Graduated 2020)
  32. +
  33. Feilong Ma (Advisor: James Haxby; Graduated 2019)
  34. +
  35. Kevin Hartstein (Advisor: Peter Tse; Graduated 2019)
  36. +
  37. Beau Sievers (Advisor: Thalia Wheatley; Graduated 2018)
  38. +
  39. Kristina Rapuano (Advisor: Luke Chang; Graduated 2018)
  40. +
  41. Luke Eglington (Advisor: Sean Kang; Graduated 2018)
  42. +
  43. Gina Notaro (Advisor: Solomon Diamond; Graduated 2017)
  44. +
+

Specialist Committees

+
    +
  1. Yuqi Zhang (Advisors: Richard Granger and James Haxby)
  2. +
  3. Covert Geary (Advisor: John Murray)
  4. +
  5. Menghan Yang (Advisor: Luke Chang)
  6. +
  7. Deepasri Prasad (Advisor: Caroline Robertson)
  8. +
  9. Zizhuang Miao (Advisor: Tor Wager)
  10. +
  11. Benjamin Graul (Advisor: Tor Wager)
  12. +
  13. Yeongji Lee (Advisor: David Kraemer)
  14. +
  15. Thomas Botch (Advisors: Emily Finn and Caroline Robertson)
  16. +
  17. Dhaval Bhatt (Advisor: Meghan Meyer)
  18. +
  19. Clara Sava-Segal (Advisor: Emily Finn)
  20. +
  21. Wasita Mahaphanit (Advisor: Luke Chang)
  22. +
  23. Jane Han (Advisor: James Haxby)
  24. +
  25. Megan Hillis (Advisor: Caroline Robertson)
  26. +
  27. Anna Mynick (Advisor: Caroline Robertson)
  28. +
  29. Marissa Clark (Advisor: Luke Chang)
  30. +
  31. Robert Quon (Advisor: Barbara Jobst)
  32. +
  33. Mira Nencheva (Advisor: Casey Lew-Williams)
  34. +
  35. Marvin Maechler (Advisor: Peter Tse)
  36. +
  37. Eli Bowen (Advisor: Richard Granger)
  38. +
  39. Emma Templeton (Advisor: Thalia Wheatley)
  40. +
  41. Feilong Ma (Advisor: James Haxby)
  42. +
  43. Youki Tanaka (Advisor: Matthijs van der Meer)
  44. +
+

Undergraduate Advisees

+
    +
  1. Sam Haskel* (2025 – )
  2. +
  3. Harrison Stropkay* (co-advised with Daniel Rockmore; 2025)
  4. +
  5. Kevin Chang (2025 – )
  6. +
  7. Andrew Richardson (2025 – )
  8. +
  9. Ben Hanson (2025 – )
  10. +
  11. Annabelle Morrow (2025 – )
  12. +
  13. Owen Phillips (2025 – )
  14. +
  15. Rodrigo Vega Ayllon (2025 – )
  16. +
  17. Joy Maina (2025 – )
  18. +
  19. Alexandra Wingo (2025 – )
  20. +
  21. Angelyn Liu (2025 – )
  22. +
  23. Miel Wewerka (2024 – )
  24. +
  25. Manraaj Singh (2024 – )
  26. +
  27. Can Kam (2024 – )
  28. +
  29. Chelsea Joe (2024 – )
  30. +
  31. Jacob Bacus (2024 – )
  32. +
  33. Rohan Goyal (2024 – )
  34. +
  35. Harrison Stropkay* (2024 – )
  36. +
  37. Abigayle McCusker (2024 – )
  38. +
  39. Torsha Chakraverty (2024 – )
  40. +
  41. Chloe Terestchenko (2024 – )
  42. +
  43. Ansh Motiani (2024 – )
  44. +
  45. Kaitlyn Peng* (2024 – )
  46. +
  47. Everett Tai (2024 – )
  48. +
  49. Andrew Cao (2024 – )
  50. +
  51. Michael Chen (2023 – )
  52. +
  53. Jake McDermid (2023 – )
  54. +
  55. Om Shah (2023 – )
  56. +
  57. Grady Redding (2023 – )
  58. +
  59. DJ Matusz (2023 – )
  60. +
  61. Sarah Parigela (2023 – )
  62. +
  63. Aaryan Agarwal (2023 – )
  64. +
  65. Maura Hough (2023 – )
  66. +
  67. Emma Reeder (2023 – )
  68. +
  69. Safwan Rashid (2023 )
  70. +
  71. Francisca Fadairo (2023 – )
  72. +
  73. Ameer Talha Yasser (2023)
  74. +
  75. Yue Zhuo (2023 – )
  76. +
  77. Megan Liu (2023 – 2024)
  78. +
  79. Charles Baker (2023)
  80. +
  81. Andrew Shi (2023)
  82. +
  83. Ash Chinta (2023)
  84. +
  85. Xueyao Zheng (2023)
  86. +
  87. Sergio Campos Legonia (2023)
  88. +
  89. Jennifer Xu (2023 – )
  90. +
  91. Elias Emery (2023)
  92. +
  93. Yvonne Chen (2023)
  94. +
  95. William McCall (2023)
  96. +
  97. Natalie Schreder (2023)
  98. +
  99. Raselas Dessalegn (2023)
  100. +
  101. Grace Wang (2023)
  102. +
  103. Mira Chiruvolu (2023 – 2024)
  104. +
  105. Anna Mikhailova (2022)
  106. +
  107. Ansh Patel (2022 – )
  108. +
  109. Ziyan Zhu (2022 – )
  110. +
  111. Benjamin Lehrburger (2022)
  112. +
  113. Thomas Corrado (2022)
  114. +
  115. Samuel Crombie (2022)
  116. +
  117. Alexander Marcoux (2022)
  118. +
  119. Jessna Brar (2022)
  120. +
  121. Wenhua Liang (2022)
  122. +
  123. Kevin Cao (2022)
  124. +
  125. Goutham Veeramachaneni (2022)
  126. +
  127. Zachary Somma (2022)
  128. +
  129. Dawson Haddox (2022)
  130. +
  131. Swestha Jain (2022)
  132. +
  133. Aidan Adams (2021)
  134. +
  135. Damini Kohli (2021)
  136. +
  137. Kunal Jha* (2021 – )
  138. +
  139. Daniel Carstensen* (2021 – )
  140. +
  141. Brian Chiang (2021 – 2022)
  142. +
  143. Daniel Ha (2021)
  144. +
  145. Darren Gu (2020 – 2021)
  146. +
  147. Tyler Chen (2020 – 2022)
  148. +
  149. Tehut Biru* (2020 – 2021)
  150. +
  151. Chris Suh (2020 – 2021)
  152. +
  153. Helen Liu (2020)
  154. +
  155. Kelly Rutherford (2020)
  156. +
  157. Chris Jun (2020 – 2022)
  158. +
  159. Ethan Adner (2020 – 2022)
  160. +
  161. Chris Long (2020 – 2021)
  162. +
  163. Esme Chen (2020 – 2021)
  164. +
  165. Luca Lit (2020)
  166. +
  167. Vivian Tran (2020)
  168. +
  169. Greg Han (2020)
  170. +
  171. Austin Zhang (2020)
  172. +
  173. Chelsea Uddenberg (2020)
  174. +
  175. Shane Hewitt (2020)
  176. +
  177. Chetan Palvuluri (2020)
  178. +
  179. Aaron Lee (2019 – 2020)
  180. +
  181. Anne George (2019 – 2020)
  182. +
  183. Sarah Park (2019 – 2020)
  184. +
  185. Shane Park (2019 – 2020)
  186. +
  187. William Chen (2019 – 2020)
  188. +
  189. Tudor Muntianu (2019 – 2021)
  190. +
  191. William Baxley (2018 – 2019)
  192. +
  193. Ann Carpenter (2018)
  194. +
  195. Seung Ju Lee (2018)
  196. +
  197. Mustafa Nasir-Moin (2018)
  198. +
  199. Iain Sheerin (2018)
  200. +
  201. Darya Romanova (2018)
  202. +
  203. Alejandro Martinez (2018 – 2020)
  204. +
  205. Rachael Chacko (2018)
  206. +
  207. Kirsten Soh (2018)
  208. +
  209. Paxton Fitzpatrick* (2017 – 2019)
  210. +
  211. Stephen Satterthwaite (2017 – 2018)
  212. +
  213. Bryan Bollinger (2017 – 2018)
  214. +
  215. Christina Lu (2017)
  216. +
  217. Armando Oritz (2017)
  218. +
  219. Campbell Field (2016 – 2018)
  220. +
  221. Madeline Lee (2016 – 2020)
  222. +
  223. Wei Liang Samuel Ching (2016 – 2017)
  224. +
  225. Marisol Tracy (2016 – 2017)
  226. +
  227. Allison Frantz (2016 – 2017)
  228. +
  229. Aamuktha Porika (2016 – 2017)
  230. +
  231. Jake Rost (2016)
  232. +
  233. Clara Silvanic (2016)
  234. +
  235. Aman Agarwal (2016)
  236. +
  237. Joseph Finkelstein (2016)
  238. +
  239. Sheherzad Mohydin (2016)
  240. +
  241. Peter Tran (2016)
  242. +
  243. Gal Perlman (2016)
  244. +
  245. Jessica Tin (2016)
  246. +
+
+ +
+
+ +
+

Service

+
+

Professional organizations

+
    +
  1. Dartmouth-Kalaniyot (2024 – ) Co-founder, Board member
  2. +
  3. National Science Foundation (2023, 2024, 2025) Panel member
  4. +
  5. NeuroMatch Academy (2021 – ) Developer and project mentor (computational neuroscience and deep learning tracks)
  6. +
  7. Artificial Intelligence and Statistics (AISTATS; 2021 – 2024) Area +chair (natural language processing and machine learning)
  8. +
  9. Methods in Neuroscience at Dartmouth (MIND) Summer School (2017 – ) Co-founder
  10. +
+ +
+
+

Dartmouth committee memberships

+
    +
  1. Social (Department Well-Being) Committee (2023–2024, 2024–2025)
  2. +
  3. Undergraduate Committee (2021–2022, 2015–2016)
  4. +
  5. Graduate Committee (2020–2021, 2016–2019)
  6. +
  7. Cognitive Neuroscience Faculty Search Committee (2018)
  8. +
  9. Molecular and Systems Biology Faculty Search Committee (2017)
  10. +
  11. Cognitive Neuroscience Faculty Search Committee (2016)
  12. +
+ +
+
+

Ad-hoc reviewer

+ Advances in Cognitive Psychology, +Agence Nationale de la Recherche, +American Journal of Psychology, +Cell Reports, +Cerebral Cortex, +Cognition, +Cognition and Emotion, +Cortex, +Computational and Systems Neuroscience (Cosyne), +eLife, +International Conference on Machine Learning (ICML), +International Joint Conference on Artificial Intelligence, +International Journal of Social Research Methodology, +Israel Science Foundation, +Journal of Cognitive Psychology, +Journal of Mathematical Psychology, +National Science Foundation (USA), +Nature, +Nature Communications, +Nature Computational Science, +Nature Human Behaviour, +Neural Computation, +NeuroImage, +Neural Information Processing Systems (NeurIPS), +Neuropsychologia, +PLoS Biology, +PLoS Computational Biology, +Proceedings of the National Academy of Sciences, +Psychological Reports, +Psychological Review, +Psychonomic Bulletin and Review, +Science, +Scientific Data, +Scientific Reports, +Society for Artificial Intelligence and Statistics (AISTATS), +Swiss National Science Foundation, +The Journal of Neuroscience + +
+
+ + +
+ + diff --git a/documents/JRM_CV.pdf b/documents/JRM_CV.pdf index 2265ac6..eaf5e46 100644 Binary files a/documents/JRM_CV.pdf and b/documents/JRM_CV.pdf differ diff --git a/people.html b/people.html index c0c83d5..5ccfb09 100644 --- a/people.html +++ b/people.html @@ -71,7 +71,7 @@

We're a motley crew of science nerds united in the pursuit of understanding

jeremy manning | lab director

Jeremy is an Associate Professor of Psychological and Brain Sciences at Dartmouth and directs the Contextual Dynamics Lab. He enjoys thinking about brains, non-brain brain-related things (e.g. zombies), computers, annoying-to-solve puzzles, and cats.

-

[CV] [Google Scholar]

+

[CV] [Google Scholar]

diff --git a/research.html b/research.html index f0a9ef1..b52690e 100644 --- a/research.html +++ b/research.html @@ -57,7 +57,7 @@
diff --git a/scripts/build_cv.py b/scripts/build_cv.py new file mode 100644 index 0000000..c50ba0e --- /dev/null +++ b/scripts/build_cv.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +""" +Build CV from LaTeX source to PDF and HTML. + +This script: +1. Compiles JRM_CV.tex to PDF using XeLaTeX +2. Converts to HTML using custom LaTeX parser (extract_cv.py) +3. Cleans up temporary LaTeX build files +""" + +import subprocess +import sys +from pathlib import Path + +# Import the custom LaTeX parser +from extract_cv import extract_cv + +# Paths +PROJECT_ROOT = Path(__file__).parent.parent +DOCUMENTS_DIR = PROJECT_ROOT / 'documents' +DATA_DIR = PROJECT_ROOT / 'data' +CSS_DIR = PROJECT_ROOT / 'css' +TEX_FILE = DOCUMENTS_DIR / 'JRM_CV.tex' +PDF_FILE = DOCUMENTS_DIR / 'JRM_CV.pdf' +HTML_FILE = DOCUMENTS_DIR / 'JRM_CV.html' + +# LaTeX temporary file extensions to clean up +LATEX_TEMP_EXTENSIONS = [ + '.aux', '.log', '.out', '.toc', '.lof', '.lot', '.fls', '.fdb_latexmk', + '.synctex.gz', '.bbl', '.blg', '.nav', '.snm', '.vrb', + '.4ct', '.4tc', '.idv', '.lg', '.tmp', '.xdv', '.xref', '.dvi' +] + + +def run_command(cmd: list, cwd: Path = None, timeout: int = 120) -> tuple: + """Run a command and return (success, stdout, stderr).""" + try: + result = subprocess.run( + cmd, + cwd=cwd, + capture_output=True, + text=True, + timeout=timeout + ) + return result.returncode == 0, result.stdout, result.stderr + except subprocess.TimeoutExpired: + return False, '', f'Command timed out after {timeout}s' + except Exception as e: + return False, '', str(e) + + +def compile_pdf() -> bool: + """Compile LaTeX to PDF using XeLaTeX (run twice for references).""" + print(f"Compiling {TEX_FILE.name} to PDF...") + + # Run xelatex twice for references + for i in range(2): + success, stdout, stderr = run_command( + ['xelatex', '-interaction=nonstopmode', TEX_FILE.name], + cwd=DOCUMENTS_DIR, + timeout=120 + ) + if not success: + print(f"XeLaTeX pass {i+1} failed:") + print(stderr) + # Check if PDF was still created despite warnings + if not PDF_FILE.exists(): + return False + + if PDF_FILE.exists(): + size = PDF_FILE.stat().st_size + print(f"PDF generated: {PDF_FILE} ({size:,} bytes)") + return True + else: + print("PDF file not created") + return False + + +def compile_html() -> bool: + """Convert LaTeX to HTML using custom parser.""" + print(f"Converting {TEX_FILE.name} to HTML using custom parser...") + + success = extract_cv(TEX_FILE, HTML_FILE) + + if success and HTML_FILE.exists(): + size = HTML_FILE.stat().st_size + print(f"HTML generated: {HTML_FILE} ({size:,} bytes)") + return True + else: + print("HTML file not created") + return False + + +def cleanup_temp_files(): + """Remove temporary LaTeX build files.""" + print("Cleaning up temporary files...") + + cleaned = 0 + for ext in LATEX_TEMP_EXTENSIONS: + for f in DOCUMENTS_DIR.glob(f'*{ext}'): + try: + f.unlink() + cleaned += 1 + except Exception as e: + print(f"Could not remove {f}: {e}") + + print(f"Removed {cleaned} temporary files") + + +def validate_output() -> bool: + """Validate that PDF and HTML were generated correctly.""" + print("\nValidating output...") + + errors = [] + + # Check PDF + if not PDF_FILE.exists(): + errors.append("PDF file not found") + elif PDF_FILE.stat().st_size < 1000: + errors.append(f"PDF file too small ({PDF_FILE.stat().st_size} bytes)") + + # Check HTML + if not HTML_FILE.exists(): + errors.append("HTML file not found") + else: + with open(HTML_FILE, 'r', encoding='utf-8') as f: + html_content = f.read() + + # Check for key sections + required_sections = ['Employment', 'Education', 'Publications'] + for section in required_sections: + if section not in html_content: + errors.append(f"HTML missing section: {section}") + + # Check for download button + if 'cv-download-bar' not in html_content: + errors.append("HTML missing PDF download button") + + # Check for CSS link + if 'cv.css' not in html_content: + errors.append("HTML missing CSS link") + + if errors: + print("Validation errors:") + for error in errors: + print(f" - {error}") + return False + + print("Validation passed!") + print(f" PDF: {PDF_FILE} ({PDF_FILE.stat().st_size:,} bytes)") + print(f" HTML: {HTML_FILE} ({HTML_FILE.stat().st_size:,} bytes)") + return True + + +def build_cv() -> bool: + """Main build function.""" + print("=" * 60) + print("Building CV from LaTeX source") + print("=" * 60) + + # Check source file exists + if not TEX_FILE.exists(): + print(f"Error: Source file not found: {TEX_FILE}") + return False + + # Compile PDF + if not compile_pdf(): + print("Failed to compile PDF") + return False + + # Compile HTML using custom parser + if not compile_html(): + print("Failed to generate HTML") + return False + + # Clean up + cleanup_temp_files() + + # Validate + if not validate_output(): + return False + + print("\n" + "=" * 60) + print("CV build completed successfully!") + print("=" * 60) + return True + + +if __name__ == '__main__': + success = build_cv() + sys.exit(0 if success else 1) diff --git a/scripts/extract_cv.py b/scripts/extract_cv.py new file mode 100644 index 0000000..3642ee1 --- /dev/null +++ b/scripts/extract_cv.py @@ -0,0 +1,645 @@ +#!/usr/bin/env python3 +""" +Custom LaTeX to HTML converter for JRM_CV.tex. + +This parser handles the specific LaTeX constructs used in the CV +and produces HTML that matches the PDF formatting exactly. +""" + +import re +from pathlib import Path +from typing import Dict, List, Optional +from dataclasses import dataclass, field + + +@dataclass +class CVSection: + """Represents a section of the CV.""" + title: str + content: str + subsections: List['CVSection'] = field(default_factory=list) + + +def read_latex_file(filepath: Path) -> str: + """Read and return the content of a LaTeX file.""" + with open(filepath, 'r', encoding='utf-8') as f: + return f.read() + + +def extract_document_body(latex: str) -> str: + """Extract content between begin document and end document.""" + match = re.search(r'\\begin\{document\}(.+?)\\end\{document\}', latex, re.DOTALL) + if match: + return match.group(1) + return latex + + +def balanced_braces_extract(text: str, start: int) -> tuple: + """Extract content within balanced braces starting at position start. + Returns (content, end_position) or (None, -1) if not found.""" + if start >= len(text) or text[start] != '{': + return None, -1 + + depth = 0 + content_start = start + 1 + + for i in range(start, len(text)): + if text[i] == '{': + depth += 1 + elif text[i] == '}': + depth -= 1 + if depth == 0: + return text[content_start:i], i + 1 + + return None, -1 + + +def convert_command(text: str, cmd: str, html_start: str, html_end: str) -> str: + """Convert a LaTeX command to HTML tags.""" + pattern = '\\' + cmd + '{' + result = [] + i = 0 + + while i < len(text): + pos = text.find(pattern, i) + if pos == -1: + result.append(text[i:]) + break + + result.append(text[i:pos]) + content, end_pos = balanced_braces_extract(text, pos + len(pattern) - 1) + + if content is not None: + result.append(html_start) + result.append(content) + result.append(html_end) + i = end_pos + else: + result.append(text[pos:pos + len(pattern)]) + i = pos + len(pattern) + + return ''.join(result) + + +def convert_href(text: str) -> str: + """Convert href commands to HTML links.""" + result = [] + i = 0 + + while i < len(text): + match = re.search(r'\\href\{', text[i:]) + if not match: + result.append(text[i:]) + break + + pos = i + match.start() + result.append(text[i:pos]) + + # \href{ is 6 chars, { is at position 5 from match start + brace_pos = pos + 5 + + # Extract URL + url, url_end = balanced_braces_extract(text, brace_pos) + if url is None: + result.append(text[pos:pos + 6]) + i = pos + 6 + continue + + # Extract link text + link_text, text_end = balanced_braces_extract(text, url_end) + if link_text is None: + result.append(text[pos:url_end]) + i = url_end + continue + + result.append(f'{link_text}') + i = text_end + + return ''.join(result) + + +def remove_command_with_braces(text: str, cmd: str) -> str: + """Remove a LaTeX command and its braced argument, handling nested braces.""" + pattern = '\\' + cmd + '{' + result = [] + i = 0 + + while i < len(text): + pos = text.find(pattern, i) + if pos == -1: + result.append(text[i:]) + break + + result.append(text[i:pos]) + brace_pos = pos + len(pattern) - 1 + _, end_pos = balanced_braces_extract(text, brace_pos) + + if end_pos > 0: + i = end_pos + else: + result.append(text[pos:pos + len(pattern)]) + i = pos + len(pattern) + + return ''.join(result) + + +def convert_latex_formatting(text: str) -> str: + """Convert LaTeX formatting commands to HTML.""" + # Remove LaTeX comments (lines starting with %) + text = re.sub(r'^%.*$', '', text, flags=re.MULTILINE) + text = re.sub(r'(?', '') + text = convert_command(text, 'textit', '', '') + text = convert_command(text, 'emph', '', '') + text = convert_command(text, 'textsc', '', '') + text = convert_command(text, 'ul', '', '') + text = convert_command(text, 'texttt', '', '') + text = convert_command(text, 'textsuperscript', '', '') + + # Handle {\bf text} style (old LaTeX) + text = re.sub(r'\{\\bf\s+([^}]+)\}', r'\1', text) + text = re.sub(r'\{\\it\s+([^}]+)\}', r'\1', text) + text = re.sub(r'\{\\sc\s+([^}]+)\}', r'\1', text) + + # Handle special characters + replacements = [ + (r'\&', '&'), + (r'\_', '_'), + (r'\%', '%'), + (r'\$', '$'), + (r'\#', '#'), + (r'\-', ''), # discretionary hyphen + ('``', '"'), + ("''", '"'), + ('`', "'"), + ("'", "'"), + ('---', '—'), # em-dash (check before en-dash) + ('--', '–'), # en-dash + ('~', ' '), # non-breaking space + (r'\,', ' '), # thin space + (r'\"a', 'ä'), + (r'\"o', 'ö'), + (r'\"u', 'ü'), + (r'\"{a}', 'ä'), + (r'\"{o}', 'ö'), + (r'\"{u}', 'ü'), + ] + + for old, new in replacements: + text = text.replace(old, new) + + # Line breaks with spacing - \\[.1cm] etc adds extra space between blocks + text = re.sub(r'\\\\\[[\d.]+cm\]', '
\n', text) + text = text.replace(r'\\', '
\n') + + # Remove remaining LaTeX commands we don't need + text = re.sub(r'\\noindent\s*', '', text) + + # Math mode: $...$ - simple handling + text = re.sub(r'\$([^$]+)\$', r'\1', text) + + # Superscripts in math + text = re.sub(r'\^\\mathrm\{([^}]+)\}', r'\1', text) + text = re.sub(r'\^\{([^}]+)\}', r'\1', text) + + # Remove any remaining raw LaTeX commands that shouldn't appear + text = re.sub(r'\\begin\{center\}', '', text) + text = re.sub(r'\\end\{center\}', '', text) + text = re.sub(r'\{\\scriptsize\s+', '', text) + text = re.sub(r'\\today\}?\s*', '', text) + + return text + + +def parse_single_etaremune(list_content: str) -> List[str]: + """Parse a single etaremune list content and return items.""" + items = [] + # Split by item + parts = re.split(r'\\item\s*', list_content) + + for part in parts: + part = part.strip() + if part: + items.append(convert_latex_formatting(part)) + + return items + + +def parse_etaremune(content: str) -> List[str]: + """Parse etaremune environment (reverse-numbered list) and return items.""" + items = [] + + # Find ALL etaremune lists + for match in re.finditer(r'\\begin\{etaremune\}(.+?)\\end\{etaremune\}', content, re.DOTALL): + list_content = match.group(1) + items.extend(parse_single_etaremune(list_content)) + + return items + + +def parse_multicol_etaremune(content: str) -> List[str]: + """Parse multicol environment containing etaremune.""" + # Remove multicol wrapper + content = re.sub(r'\\begin\{multicols\}\{\d+\}', '', content) + content = re.sub(r'\\end\{multicols\}', '', content) + + return parse_etaremune(content) + + +def parse_labeled_lists(content: str) -> List[tuple]: + """Parse content with labeled lists like: + \\textit{Label}: + \\begin{etaremune}...\\end{etaremune} + + Returns list of (label, items) tuples. + """ + labeled_lists = [] + + # Pattern to find labeled lists: \textit{Label}: followed by etaremune + # Also handle multicols wrapper + pattern = r'\\textit\{([^}]+)\}:\s*(?:\\begin\{multicols\}\{\d+\})?\s*\\begin\{etaremune\}(.+?)\\end\{etaremune\}\s*(?:\\end\{multicols\})?' + + for match in re.finditer(pattern, content, re.DOTALL): + label = match.group(1).strip() + list_content = match.group(2) + items = parse_single_etaremune(list_content) + labeled_lists.append((label, items)) + + return labeled_lists + + +def extract_header_info(body: str) -> Dict[str, str]: + """Extract header information (name, title, contact).""" + info = {} + + # Find header section (before first section) + header_match = re.search(r'^(.+?)\\section\*', body, re.DOTALL) + if header_match: + header = header_match.group(1) + + # Name - find the LARGE block and extract everything until the line break + name_match = re.search(r'\{\\LARGE\s*(.+?)\}\\\\', header, re.DOTALL) + if name_match: + name_raw = name_match.group(1).strip() + info['name'] = convert_latex_formatting(name_raw) + + # Find the position after the LARGE block (handles nested braces) + large_match = re.search(r'\{\\LARGE', header) + if large_match: + # Find the matching closing brace + start_pos = large_match.start() + _, end_pos = balanced_braces_extract(header, start_pos) + if end_pos > 0: + # Skip past the \\ after the closing brace + if header[end_pos:end_pos+2] == '\\\\': + end_pos += 2 + # Skip any spacing like [0.25cm] + spacing_match = re.match(r'\[[\d.]+cm\]', header[end_pos:]) + if spacing_match: + end_pos += spacing_match.end() + rest_of_header = header[end_pos:] + else: + rest_of_header = header + else: + rest_of_header = header + + # Split by line breaks, but track which ones have spacing + # Use a pattern that captures the spacing indicator + lines = [] + # Find all parts and their spacing + parts_with_spacing = re.split(r'(\\\\(?:\[[\d.]+cm\])?)', rest_of_header) + + current_text = '' + for i, part in enumerate(parts_with_spacing): + if re.match(r'\\\\(?:\[[\d.]+cm\])?', part): + # This is a line break - check if it has spacing + has_spacing = '[' in part and 'cm]' in part + if current_text.strip(): + # Remove LaTeX comments + text = re.sub(r'%.*$', '', current_text, flags=re.MULTILINE).strip() + if text and text not in ['}', '{', '']: + converted = convert_latex_formatting(text).strip() + if converted and converted not in ['}', '{', '']: + lines.append({'text': converted, 'space_after': has_spacing}) + current_text = '' + else: + current_text += part + + # Handle last part + if current_text.strip(): + text = re.sub(r'%.*$', '', current_text, flags=re.MULTILINE).strip() + if text and text not in ['}', '{', '']: + converted = convert_latex_formatting(text).strip() + if converted and converted not in ['}', '{', '']: + lines.append({'text': converted, 'space_after': False}) + + info['header_lines'] = lines + + return info + + +def remove_latex_comments(text: str) -> str: + """Remove LaTeX comments from text.""" + # Remove full-line comments + text = re.sub(r'^%.*$', '', text, flags=re.MULTILINE) + # Remove inline comments (but not escaped \%) + text = re.sub(r'(? List[CVSection]: + """Extract all sections from the CV.""" + sections = [] + + # Remove comments BEFORE splitting to avoid commented-out subsections + body = remove_latex_comments(body) + + # Split by section* or section + section_pattern = r'\\section\*?\{([^}]+)\}' + parts = re.split(section_pattern, body) + + # parts[0] is header, parts[1] is first section title, parts[2] is content, etc. + if len(parts) > 1: + for i in range(1, len(parts), 2): + if i + 1 < len(parts): + title = parts[i].strip() + content = parts[i + 1].strip() + + # Check for subsections + subsection_pattern = r'\\subsection\*?\{([^}]+)\}' + sub_parts = re.split(subsection_pattern, content) + + if len(sub_parts) > 1: + section = CVSection(title=title, content=sub_parts[0].strip()) + for j in range(1, len(sub_parts), 2): + if j + 1 < len(sub_parts): + sub_title = sub_parts[j].strip() + sub_content = sub_parts[j + 1].strip() + section.subsections.append(CVSection(title=sub_title, content=sub_content)) + sections.append(section) + else: + sections.append(CVSection(title=title, content=content)) + + return sections + + +def render_list_items(items: List[str], reversed_numbering: bool = True) -> str: + """Render a list of items as HTML ordered list.""" + if not items: + return '' + + if reversed_numbering: + html = f'
    \n' + else: + html = '
      \n' + + for item in items: + item = item.strip() + # Clean up leading/trailing breaks + item = re.sub(r'^
      \s*', '', item) + item = re.sub(r'\s*
      \s*$', '', item) + html += f'
    1. {item}
    2. \n' + + html += '
    \n' + return html + + +def render_labeled_lists(labeled_lists: List[tuple], use_two_column: bool = False) -> str: + """Render labeled lists (like Mentorship section) to HTML.""" + html = '' + for label, items in labeled_lists: + html += f'

    {label}

    \n' + # Apply 2-column layout for Undergraduate Advisees or when explicitly requested + is_undergrad = 'undergraduate' in label.lower() + if (use_two_column or is_undergrad) and len(items) > 10: + html += f'
    {render_list_items(items)}
    \n' + else: + html += render_list_items(items) + return html + + +def extract_footnote(content: str) -> tuple: + """Extract blfootnote content from text. Returns (footnote_text, cleaned_content).""" + pattern = r'\\blfootnote\{' + match = re.search(pattern, content) + if not match: + return None, content + + brace_pos = match.end() - 1 + footnote_content, end_pos = balanced_braces_extract(content, brace_pos) + if footnote_content: + # Convert LaTeX formatting in footnote + footnote_html = convert_latex_formatting(footnote_content) + # Remove the footnote from content + cleaned = content[:match.start()] + content[end_pos:] + return footnote_html, cleaned + return None, content + + +def preprocess_content(content: str, extract_footnotes: bool = False) -> tuple: + """Preprocess content to handle problematic LaTeX commands before parsing. + + If extract_footnotes is True, returns (footnote, cleaned_content). + Otherwise returns just cleaned_content for backwards compatibility. + """ + # Remove comments first + content = re.sub(r'^%.*$', '', content, flags=re.MULTILINE) + content = re.sub(r'(? str: + """Render section content to HTML based on section type.""" + + # Extract footnote first (for display as note under section header) + footnote, content = preprocess_content(content, extract_footnotes=True) + + # Build HTML with optional footnote note + html_prefix = '' + if footnote: + html_prefix = f'

    {footnote}

    \n' + + # Check for labeled lists (like in Mentorship section) + labeled_lists = parse_labeled_lists(content) + if labeled_lists: + # Check if this is the undergraduate advisees (use two-column) + use_two_col = 'undergraduate' in section_title.lower() + return html_prefix + render_labeled_lists(labeled_lists, use_two_column=use_two_col) + + # Check for etaremune lists + if r'\begin{etaremune}' in content: + if r'\begin{multicols}' in content: + items = parse_multicol_etaremune(content) + if 'talks' in section_title.lower() or 'undergraduate' in section_title.lower(): + return html_prefix + f'
    {render_list_items(items)}
    ' + else: + return html_prefix + render_list_items(items) + else: + items = parse_etaremune(content) + # Check if should be 2 columns based on section + if 'undergraduate' in section_title.lower(): + return html_prefix + f'
    {render_list_items(items)}
    ' + return html_prefix + render_list_items(items) + + # For regular content, convert formatting + content = convert_latex_formatting(content) + + # Split into paragraphs + paragraphs = re.split(r'\n\s*\n', content) + + html = html_prefix + for para in paragraphs: + para = para.strip() + if para: + # Skip empty-looking content or raw LaTeX remnants + if para in ['}', '{', ''] or para.startswith('\\'): + continue + if not para.startswith('<'): + html += f'

    {para}

    \n' + else: + html += f'{para}\n' + + return html + + +def generate_html(tex_content: str) -> str: + """Generate complete HTML from LaTeX content.""" + body = extract_document_body(tex_content) + header_info = extract_header_info(body) + sections = extract_sections(body) + + html_parts = [] + + # HTML header + html_parts.append(''' + + + + + Jeremy R. Manning, Ph.D. - Curriculum Vitae + + + +
    +
    + Curriculum Vitae + Download PDF +
    +
    + +
    +''') + + # Header section + html_parts.append('
    \n') + if 'name' in header_info: + html_parts.append(f'

    {header_info["name"]}

    \n') + if 'header_lines' in header_info: + html_parts.append('
    \n') + for line_info in header_info['header_lines']: + if isinstance(line_info, dict): + text = line_info['text'] + space_class = ' class="space-after"' if line_info.get('space_after') else '' + if text.strip(): + html_parts.append(f' {text}

    \n') + elif line_info.strip(): # Backwards compatibility + html_parts.append(f'

    {line_info}

    \n') + html_parts.append('
    \n') + html_parts.append('
    \n\n') + + # Sections + for section in sections: + section_id = section.title.lower() + section_id = re.sub(r'[^a-z0-9]+', '-', section_id).strip('-') + html_parts.append(f'
    \n') + html_parts.append(f'

    {section.title}

    \n') + + if section.subsections: + if section.content.strip(): + rendered = render_section_content(section.content, section.title) + html_parts.append(f' {rendered}\n') + + for subsection in section.subsections: + rendered = render_section_content(subsection.content, subsection.title) + # Skip empty subsections + if not rendered.strip(): + continue + sub_id = re.sub(r'[^a-z0-9]+', '-', subsection.title.lower()).strip('-') + html_parts.append(f'
    \n') + html_parts.append(f'

    {subsection.title}

    \n') + html_parts.append(f' {rendered}\n') + html_parts.append('
    \n') + else: + rendered = render_section_content(section.content, section.title) + html_parts.append(f' {rendered}\n') + + html_parts.append('
    \n\n') + + # Footer + html_parts.append('''
    +

    Last updated:

    + +
    +
    + + +''') + + return ''.join(html_parts) + + +def extract_cv(input_path: Path, output_path: Path) -> bool: + """Main extraction function.""" + try: + tex_content = read_latex_file(input_path) + html_content = generate_html(tex_content) + + with open(output_path, 'w', encoding='utf-8') as f: + f.write(html_content) + + return True + except Exception as e: + print(f"Error extracting CV: {e}") + import traceback + traceback.print_exc() + return False + + +if __name__ == '__main__': + project_root = Path(__file__).parent.parent + input_file = project_root / 'documents' / 'JRM_CV.tex' + output_file = project_root / 'documents' / 'JRM_CV.html' + + if extract_cv(input_file, output_file): + print(f"Successfully generated {output_file}") + else: + print("Failed to generate HTML") diff --git a/tests/test_build_cv.py b/tests/test_build_cv.py new file mode 100644 index 0000000..12b8a0b --- /dev/null +++ b/tests/test_build_cv.py @@ -0,0 +1,856 @@ +"""Tests for CV build system using REAL files - no mocks. + +All tests use real file operations, real PDF compilation, and real HTML generation +to verify the actual behavior of the CV build scripts. + +IMPORTANT: NO MOCKS OR SIMULATIONS - all tests use real files and real operations. +""" +import pytest +from pathlib import Path +import tempfile +import subprocess +import re + +from extract_cv import ( + read_latex_file, + extract_document_body, + balanced_braces_extract, + convert_command, + convert_href, + convert_latex_formatting, + parse_etaremune, + extract_header_info, + extract_sections, + generate_html, + extract_cv, +) + +from build_cv import ( + run_command, + compile_pdf, + compile_html, + cleanup_temp_files, + validate_output, + build_cv, + PDF_FILE, + TEX_FILE, + HTML_FILE, +) + + +class TestLatexConversion: + """Test LaTeX conversion functions.""" + + def test_convert_textbf(self): + """Test that \\textbf is converted to .""" + text = r'\textbf{bold text}' + result = convert_latex_formatting(text) + assert 'bold text' in result + + def test_convert_textbf_nested(self): + """Test nested braces in \\textbf.""" + text = r'\textbf{text with {nested} braces}' + result = convert_latex_formatting(text) + assert 'text with {nested} braces' in result + + def test_convert_href_basic(self): + """Test basic \\href conversion.""" + text = r'\href{https://example.com}{Link Text}' + result = convert_latex_formatting(text) + assert 'Link Text' in result + + def test_convert_href_in_sentence(self): + """Test \\href within a sentence.""" + text = r'Visit \href{https://example.com}{our site} for more.' + result = convert_latex_formatting(text) + assert 'Visit our site for more.' in result + + def test_convert_multiple_hrefs(self): + """Test multiple \\href commands.""" + text = r'\href{http://a.com}{Link A} and \href{http://b.com}{Link B}' + result = convert_latex_formatting(text) + assert 'Link A' in result + assert 'Link B' in result + + def test_convert_textit(self): + """Test \\textit conversion.""" + text = r'\textit{italic text}' + result = convert_latex_formatting(text) + assert 'italic text' in result + + def test_convert_emph(self): + """Test \\emph conversion.""" + text = r'\emph{emphasized}' + result = convert_latex_formatting(text) + assert 'emphasized' in result + + def test_convert_textsc(self): + """Test \\textsc conversion.""" + text = r'\textsc{Small Caps}' + result = convert_latex_formatting(text) + assert 'Small Caps' in result + + def test_convert_special_ampersand(self): + """Test \\& conversion.""" + text = r'Smith \& Jones' + result = convert_latex_formatting(text) + assert 'Smith & Jones' in result + + def test_convert_special_underscore(self): + """Test \\_ conversion.""" + text = r'file\_name' + result = convert_latex_formatting(text) + assert 'file_name' in result + + def test_convert_special_percent(self): + """Test \\% conversion.""" + text = r'50\% off' + result = convert_latex_formatting(text) + assert '50% off' in result + + def test_convert_special_dollar(self): + """Test \\$ conversion.""" + text = r'\$100' + result = convert_latex_formatting(text) + assert '$100' in result + + def test_convert_em_dash(self): + """Test --- to em-dash conversion.""" + text = 'Hello---world' + result = convert_latex_formatting(text) + assert 'Hello—world' in result + + def test_convert_en_dash(self): + """Test -- to en-dash conversion.""" + text = '2020--2025' + result = convert_latex_formatting(text) + assert '2020–2025' in result + + def test_convert_quotes(self): + """Test quote conversion.""" + text = "``Hello'' and `world'" + result = convert_latex_formatting(text) + assert '"Hello"' in result + assert "'world'" in result + + def test_convert_linebreak(self): + """Test \\\\ to
    conversion.""" + text = r'Line 1\\Line 2' + result = convert_latex_formatting(text) + assert '
    ' in result + + def test_convert_combined_formatting(self): + """Test multiple formatting commands together.""" + text = r'\textbf{Bold} and \textit{italic} and \href{http://example.com}{link}' + result = convert_latex_formatting(text) + assert 'Bold' in result + assert 'italic' in result + assert 'link' in result + + def test_balanced_braces_extract(self): + """Test balanced braces extraction.""" + text = '{simple content}' + content, end = balanced_braces_extract(text, 0) + assert content == 'simple content' + assert end == len(text) + + def test_balanced_braces_nested(self): + """Test nested braces extraction.""" + text = '{outer {inner} text}' + content, end = balanced_braces_extract(text, 0) + assert content == 'outer {inner} text' + + def test_balanced_braces_multiple_levels(self): + """Test deeply nested braces.""" + text = '{a {b {c} d} e}' + content, end = balanced_braces_extract(text, 0) + assert content == 'a {b {c} d} e' + + def test_convert_command_basic(self): + """Test convert_command function.""" + text = r'\textbf{bold}' + result = convert_command(text, 'textbf', '', '') + assert result == 'bold' + + def test_convert_href_function(self): + """Test convert_href function.""" + text = r'\href{http://example.com}{Link}' + result = convert_href(text) + assert 'Link' in result + + +class TestParseEtaremune: + """Test etaremune list parsing.""" + + def test_parse_simple_list(self): + """Test parsing a simple etaremune list.""" + content = r''' +\begin{etaremune} +\item First item +\item Second item +\item Third item +\end{etaremune} +''' + items = parse_etaremune(content) + assert len(items) == 3 + assert 'First item' in items[0] + assert 'Second item' in items[1] + assert 'Third item' in items[2] + + def test_parse_list_with_formatting(self): + """Test parsing list with LaTeX formatting.""" + content = r''' +\begin{etaremune} +\item \textbf{Bold} text +\item \textit{Italic} text +\item \href{http://example.com}{Link} +\end{etaremune} +''' + items = parse_etaremune(content) + assert len(items) == 3 + assert 'Bold' in items[0] + assert 'Italic' in items[1] + assert 'Link' in items[2] + + def test_parse_empty_list(self): + """Test parsing content without etaremune.""" + content = 'No list here' + items = parse_etaremune(content) + assert len(items) == 0 + + def test_parse_multiline_items(self): + """Test parsing items that span multiple lines.""" + content = r''' +\begin{etaremune} +\item First line +continues here +\item Second item +\end{etaremune} +''' + items = parse_etaremune(content) + assert len(items) == 2 + assert 'First line' in items[0] + assert 'continues' in items[0] + + +class TestExtractHeaderInfo: + """Test header extraction.""" + + def test_extract_name(self): + """Test extracting name from header.""" + body = r''' +{\LARGE Jeremy R. Manning, \textsc{Ph.D.}}\\ +Director, Lab\\ +Department\\ +\section*{Employment} +Content here +''' + info = extract_header_info(body) + assert 'name' in info + assert 'Jeremy R. Manning' in info['name'] + + def test_extract_header_lines(self): + """Test extracting contact info lines.""" + body = r''' +{\LARGE Name}\\[0.25cm] +Department\\ +Email: \href{mailto:test@example.com}{test@example.com}\\ +\section*{Employment} +''' + info = extract_header_info(body) + assert 'header_lines' in info + assert len(info['header_lines']) > 0 + # header_lines is a list of dicts with 'text' and 'space_after' keys + texts = [line['text'] if isinstance(line, dict) else line for line in info['header_lines']] + assert any('Department' in text for text in texts) + + +class TestExtractSections: + """Test section extraction.""" + + def test_extract_single_section(self): + """Test extracting a single section.""" + body = r''' +Header content +\section*{Employment} +Employment details here +''' + sections = extract_sections(body) + assert len(sections) >= 1 + assert any(s.title == 'Employment' for s in sections) + + def test_extract_multiple_sections(self): + """Test extracting multiple sections.""" + body = r''' +\section*{Employment} +Job info +\section*{Education} +Degree info +\section*{Publications} +Paper list +''' + sections = extract_sections(body) + assert len(sections) >= 3 + titles = [s.title for s in sections] + assert 'Employment' in titles + assert 'Education' in titles + assert 'Publications' in titles + + def test_extract_subsections(self): + """Test extracting subsections.""" + body = r''' +\section*{Main Section} +Main content +\subsection*{Subsection 1} +Sub content 1 +\subsection*{Subsection 2} +Sub content 2 +''' + sections = extract_sections(body) + main_section = next((s for s in sections if s.title == 'Main Section'), None) + assert main_section is not None + assert len(main_section.subsections) >= 2 + + +class TestHTMLGeneration: + """Test HTML generation.""" + + def test_generate_basic_html(self): + """Test generating basic HTML structure.""" + tex_content = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +Test Department\\ +\section*{Section} +Content here +\end{document} +''' + html = generate_html(tex_content) + + # Check structure + assert '' in html + assert '' in html + assert '' in html + assert '' in html + assert '' in html + assert '' in html + + def test_html_has_download_button(self): + """Test that HTML includes PDF download button.""" + tex_content = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\section*{Section} +Content +\end{document} +''' + html = generate_html(tex_content) + assert 'cv-download-bar' in html + assert 'Download PDF' in html + + def test_html_has_css_link(self): + """Test that HTML includes CSS link.""" + tex_content = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\section*{Section} +Content +\end{document} +''' + html = generate_html(tex_content) + assert 'cv.css' in html + + def test_html_includes_sections(self): + """Test that HTML includes section content.""" + tex_content = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\section*{Employment} +Professor +\section*{Education} +Ph.D. +\end{document} +''' + html = generate_html(tex_content) + assert 'Employment' in html + assert 'Education' in html + assert 'Professor' in html + + def test_html_preserves_links(self): + """Test that links are preserved in HTML.""" + tex_content = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\href{mailto:test@example.com}{test@example.com}\\ +\section*{Section} +Visit \href{http://example.com}{our website} +\end{document} +''' + html = generate_html(tex_content) + assert 'mailto:test@example.com' in html + assert 'http://example.com' in html + assert '' in content + assert 'Jeremy R. Manning' in content + assert 'Employment' in content + + +class TestPDFCompilation: + """Test PDF compilation from LaTeX.""" + + def test_xelatex_available(self): + """Test that xelatex is available on the system.""" + result = subprocess.run(['which', 'xelatex'], capture_output=True) + if result.returncode != 0: + pytest.skip("xelatex not available on this system") + + def test_compile_minimal_pdf(self): + """Test compiling a minimal LaTeX document to PDF.""" + # Check xelatex availability + result = subprocess.run(['which', 'xelatex'], capture_output=True) + if result.returncode != 0: + pytest.skip("xelatex not available") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + + # Create minimal LaTeX file + tex_file = temp_dir / 'test.tex' + tex_file.write_text(r''' +\documentclass{article} +\begin{document} +Hello, World! +\end{document} +''') + + # Compile to PDF + cmd = ['xelatex', '-interaction=nonstopmode', 'test.tex'] + subprocess.run(cmd, cwd=temp_dir, capture_output=True) + + pdf_file = temp_dir / 'test.pdf' + assert pdf_file.exists(), "PDF was not created" + + # Check file size + size = pdf_file.stat().st_size + assert size > 1000, f"PDF too small: {size} bytes" + + def test_pdf_is_valid(self): + """Test that generated PDF starts with PDF magic bytes.""" + # Check xelatex availability + result = subprocess.run(['which', 'xelatex'], capture_output=True) + if result.returncode != 0: + pytest.skip("xelatex not available") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + + tex_file = temp_dir / 'test.tex' + tex_file.write_text(r''' +\documentclass{article} +\begin{document} +Test PDF content +\end{document} +''') + + cmd = ['xelatex', '-interaction=nonstopmode', 'test.tex'] + subprocess.run(cmd, cwd=temp_dir, capture_output=True) + + pdf_file = temp_dir / 'test.pdf' + + # Read first few bytes + with open(pdf_file, 'rb') as f: + header = f.read(5) + + assert header == b'%PDF-', f"Invalid PDF header: {header}" + + def test_pdf_reasonable_size(self): + """Test that PDF has reasonable file size (> 50KB for real CV).""" + # This test will use the actual CV file if it exists + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + # Check xelatex availability + result = subprocess.run(['which', 'xelatex'], capture_output=True) + if result.returncode != 0: + pytest.skip("xelatex not available") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + + # Copy CV to temp dir + import shutil + temp_tex = temp_dir / 'JRM_CV.tex' + shutil.copy(TEX_FILE, temp_tex) + + # Compile (may need fonts, so allow it to fail gracefully) + cmd = ['xelatex', '-interaction=nonstopmode', 'JRM_CV.tex'] + result = subprocess.run(cmd, cwd=temp_dir, capture_output=True) + + pdf_file = temp_dir / 'JRM_CV.pdf' + + if pdf_file.exists(): + size = pdf_file.stat().st_size + assert size > 50000, f"PDF too small: {size} bytes (expected > 50KB)" + + +class TestContentValidation: + """Test validation of generated content.""" + + def test_html_contains_name(self): + """Test that HTML contains Jeremy R. Manning.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + html_file = temp_dir / 'cv.html' + + success = extract_cv(TEX_FILE, html_file) + assert success + + content = html_file.read_text() + assert 'Jeremy R. Manning' in content + + def test_html_contains_required_sections(self): + """Test that HTML contains required sections.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + html_file = temp_dir / 'cv.html' + + success = extract_cv(TEX_FILE, html_file) + assert success + + content = html_file.read_text() + + # Required sections + required_sections = ['Employment', 'Education', 'Publications'] + for section in required_sections: + assert section in content, f"Missing section: {section}" + + def test_html_contains_email(self): + """Test that HTML contains contact email.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + html_file = temp_dir / 'cv.html' + + success = extract_cv(TEX_FILE, html_file) + assert success + + content = html_file.read_text() + + # Should have email link + assert 'mailto:' in content + assert 'dartmouth.edu' in content + + def test_html_links_are_valid(self): + """Test that all links in HTML are well-formed URLs.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + html_file = temp_dir / 'cv.html' + + success = extract_cv(TEX_FILE, html_file) + assert success + + content = html_file.read_text() + + # Find all href attributes + href_pattern = r'href="([^"]+)"' + hrefs = re.findall(href_pattern, content) + + assert len(hrefs) > 0, "No links found in HTML" + + # Check that each href is a valid URL or anchor + for href in hrefs: + # Should be http(s), mailto, tel, relative path, or anchor + assert (href.startswith('http://') or + href.startswith('https://') or + href.startswith('mailto:') or + href.startswith('tel:') or + href.startswith('#') or + href.endswith('.pdf') or + href.endswith('.html') or + '/' in href or # Relative paths + '.' in href), f"Invalid href: {href}" + + def test_html_has_proper_structure(self): + """Test that HTML has proper document structure.""" + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + tex_file = temp_dir / 'test.tex' + tex_file.write_text(r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\section*{Section} +Content +\end{document} +''') + + html_file = temp_dir / 'test.html' + success = extract_cv(tex_file, html_file) + assert success + + content = html_file.read_text() + + # Check structure + assert content.startswith(''), "Missing DOCTYPE" + assert '' in content + assert '' in content + assert '' in content + assert '' in content + assert '' in content + assert '' in content + assert '' in content + assert '' in content + assert '' in content + + +class TestBuildCVIntegration: + """Integration tests for the full build process.""" + + def test_run_command(self): + """Test run_command function.""" + success, stdout, stderr = run_command(['echo', 'test']) + assert success + assert 'test' in stdout + + def test_run_command_timeout(self): + """Test run_command with timeout.""" + success, stdout, stderr = run_command(['sleep', '10'], timeout=1) + assert not success + assert 'timeout' in stderr.lower() or 'timed out' in stderr.lower() + + def test_cleanup_temp_files(self): + """Test cleanup of temporary LaTeX files.""" + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + + # Create fake temp files + extensions = ['.aux', '.log', '.out', '.synctex.gz'] + for ext in extensions: + (temp_dir / f'test{ext}').touch() + + # Monkey-patch DOCUMENTS_DIR + import build_cv + old_dir = build_cv.DOCUMENTS_DIR + build_cv.DOCUMENTS_DIR = temp_dir + + try: + cleanup_temp_files() + + # Check that files were removed + for ext in extensions: + assert not (temp_dir / f'test{ext}').exists() + finally: + build_cv.DOCUMENTS_DIR = old_dir + + def test_full_cv_build_if_files_exist(self): + """Test full CV build process if source files exist.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + # Check xelatex availability + result = subprocess.run(['which', 'xelatex'], capture_output=True) + if result.returncode != 0: + pytest.skip("xelatex not available") + + # Save original files if they exist + import shutil + backup_pdf = None + backup_html = None + + if PDF_FILE.exists(): + backup_pdf = PDF_FILE.with_suffix('.pdf.backup') + shutil.copy(PDF_FILE, backup_pdf) + + if HTML_FILE.exists(): + backup_html = HTML_FILE.with_suffix('.html.backup') + shutil.copy(HTML_FILE, backup_html) + + try: + # Run build + success = build_cv() + + if success: + # Verify outputs + assert PDF_FILE.exists(), "PDF not created" + assert HTML_FILE.exists(), "HTML not created" + + # Check PDF is valid + with open(PDF_FILE, 'rb') as f: + header = f.read(5) + assert header == b'%PDF-', "Invalid PDF" + + # Check HTML has content + content = HTML_FILE.read_text() + assert len(content) > 10000, "HTML too small" + assert 'Jeremy R. Manning' in content + + finally: + # Restore backups + if backup_pdf and backup_pdf.exists(): + shutil.move(backup_pdf, PDF_FILE) + if backup_html and backup_html.exists(): + shutil.move(backup_html, HTML_FILE) + + +class TestRealCVContent: + """Tests using the actual CV file.""" + + def test_actual_cv_has_employment(self): + """Test that actual CV has Employment section.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + content = read_latex_file(TEX_FILE) + assert r'\section*{Employment}' in content or r'\section{Employment}' in content + + def test_actual_cv_has_education(self): + """Test that actual CV has Education section.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + content = read_latex_file(TEX_FILE) + assert r'\section*{Education}' in content or r'\section{Education}' in content + + def test_actual_cv_has_publications(self): + """Test that actual CV has Publications section.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + content = read_latex_file(TEX_FILE) + assert r'\section*{Publications}' in content or r'\section{Publications}' in content + + def test_extract_cv_from_actual_file(self): + """Test extracting HTML from the actual CV LaTeX file.""" + if not TEX_FILE.exists(): + pytest.skip("JRM_CV.tex not found") + + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + html_file = temp_dir / 'JRM_CV.html' + + success = extract_cv(TEX_FILE, html_file) + assert success, "Failed to extract CV" + assert html_file.exists(), "HTML file not created" + + content = html_file.read_text() + + # Should be substantial (at least 10KB) + assert len(content) > 10000, f"HTML too small: {len(content)} bytes" + + # Should have structure + assert '' in content + assert '' in content + + # Should have key content + assert 'Jeremy R. Manning' in content + assert 'Employment' in content + assert 'Education' in content + assert 'Publications' in content + + # Should have download button + assert 'cv-download-bar' in content + assert 'Download PDF' in content + + +class TestEdgeCases: + """Test edge cases and error handling.""" + + def test_extract_cv_nonexistent_file(self): + """Test extracting from nonexistent file.""" + with tempfile.TemporaryDirectory() as td: + temp_dir = Path(td) + nonexistent = temp_dir / 'nonexistent.tex' + output = temp_dir / 'output.html' + + # Should handle gracefully + success = extract_cv(nonexistent, output) + assert not success + + def test_balanced_braces_no_closing(self): + """Test balanced_braces_extract with no closing brace.""" + text = '{unclosed' + content, end = balanced_braces_extract(text, 0) + assert content is None + assert end == -1 + + def test_balanced_braces_not_starting_with_brace(self): + """Test balanced_braces_extract when not starting with brace.""" + text = 'no brace here' + content, end = balanced_braces_extract(text, 0) + assert content is None + assert end == -1 + + def test_convert_latex_empty_string(self): + """Test converting empty string.""" + result = convert_latex_formatting('') + assert result == '' + + def test_parse_etaremune_malformed(self): + """Test parsing malformed etaremune.""" + content = r'\begin{etaremune}' # No end tag + items = parse_etaremune(content) + assert len(items) == 0 + + def test_extract_document_body_no_document(self): + """Test extracting body when no document environment.""" + latex = 'Just some text' + body = extract_document_body(latex) + assert body == latex # Returns original if no document env found + + def test_generate_html_minimal_document(self): + """Test generating HTML from minimal document.""" + tex = r''' +\documentclass{article} +\begin{document} +{\LARGE Test Name}\\ +\section*{Test Section} +Minimal content here +\end{document} +''' + html = generate_html(tex) + assert '' in html + # Content appears in sections, check for section title instead + assert 'Test Section' in html or 'Test Name' in html