Skip to content

Commit

Permalink
eslint
Browse files Browse the repository at this point in the history
  • Loading branch information
carnetdethese committed Apr 18, 2024
1 parent 1e46531 commit 3caf783
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 16 deletions.
2 changes: 1 addition & 1 deletion Dalloz Bibliotheque.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
/*
***** BEGIN LICENSE BLOCK *****
Copyright © 2024 YOUR_NAME <- TODO
Copyright © 2024 Alexandre Mimms
This file is part of Zotero.
Expand Down
10 changes: 3 additions & 7 deletions Jus Politicum.js
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ async function doWeb(doc, url) {

async function scrape(doc, url = doc.location.href) {
const abstract = ZU.trimInternal(text(doc, "#content"));
const titre = ZU.trimInternal(text(doc, "h2"))
const titre = ZU.trimInternal(text(doc, "h2"));
const numero = text(doc, ".release-title .num").replace("N°", "");
const linkURL = doc.querySelectorAll(".documentsAssocies a")[0].href;
const auteurs = text(doc, ".article-author").split(", ");
Expand All @@ -90,12 +90,13 @@ async function scrape(doc, url = doc.location.href) {
lastName: auteurNames[1],
creatorType: "author",
fieldMode: true
})
});
}

newItem.title = titre;
newItem.issue = numero;
newItem.abstractNote = abstract;
newItem.url = url;

newItem.attachments = [{
url: linkURL,
Expand All @@ -105,8 +106,3 @@ async function scrape(doc, url = doc.location.href) {

newItem.complete();
}

/** BEGIN TEST CASES **/
var testCases = [
]
/** END TEST CASES **/
15 changes: 7 additions & 8 deletions Lextenso.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
/*
***** BEGIN LICENSE BLOCK *****
Copyright © 2022 Alexandre Mimms
Copyright © 2024 Alexandre Mimms
This file is part of Zotero.
Expand Down Expand Up @@ -51,7 +51,7 @@ async function scrapeJournalArticle(doc, url) {
newItem.title = titre;

for (let auteur of auteurs) {
auteurNames = auteur.innerText.split(" ");
const auteurNames = auteur.innerText.split(" ");
newItem.creators.push({
firstName: auteurNames[0],
lastName: auteurNames[1],
Expand All @@ -72,7 +72,7 @@ async function scrapeJournalArticle(doc, url) {

async function scrapeBook(doc, url) {
// weirdly enough no real information is displayed on the book summary page, but
// some info, like ISBN, is shown on individual pages.
// some info, like ISBN, is shown on individual pages.
// So, we get the first url to one of those individual pages, then request it so we
// can fetch the information.
// I did not yet find a way to fetch the number of page or edition.
Expand All @@ -97,7 +97,7 @@ async function scrapeBook(doc, url) {
newItem.title = titre;

for (let auteur of auteurs) {
auteurNames = auteur.innerText.split(" ");
const auteurNames = auteur.innerText.split(" ");
newItem.creators.push({
firstName: auteurNames[0],
lastName: auteurNames[1],
Expand Down Expand Up @@ -132,12 +132,12 @@ function detectWeb(doc, url) {
function getSearchResults(doc, checkOnly) {
var items = {};
var found = false;
// TODO: adjust the CSS selector

var rows = doc.querySelectorAll('h2 > a.title[href*="/article/"]');
for (let row of rows) {
// TODO: check and maybe adjust

let href = row.href;
// TODO: check and maybe adjust

let title = ZU.trimInternal(row.textContent);
if (!href || !title) continue;
if (checkOnly) return true;
Expand Down Expand Up @@ -168,7 +168,6 @@ async function scrape(doc, url = doc.location.href, docType) {
else if (docType == "book") {
scrapeBook(doc, url);
}

}

/** BEGIN TEST CASES **/
Expand Down

0 comments on commit 3caf783

Please sign in to comment.