diff --git a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts index 9e1135059ab..b93a8685afd 100644 --- a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts +++ b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts @@ -2,7 +2,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { TextSplitter } from 'langchain/text_splitter' import { CheerioWebBaseLoader } from 'langchain/document_loaders/web/cheerio' import { test } from 'linkifyjs' -import { getAvailableURLs } from '../../../src' +import { webCrawl, xmlScrape } from '../../../src' class Cheerio_DocumentLoaders implements INode { label: string @@ -35,19 +35,34 @@ class Cheerio_DocumentLoaders implements INode { optional: true }, { - label: 'Web Scrap for Relative Links', - name: 'webScrap', - type: 'boolean', + label: 'Get Relative Links Method', + name: 'relativeLinksMethod', + type: 'options', + description: 'Select a method to retrieve relative links', + options: [ + { + label: 'Web Crawl', + name: 'webCrawl', + description: 'Crawl relative links from HTML URL' + }, + { + label: 'Scrape XML Sitemap', + name: 'scrapeXMLSitemap', + description: 'Scrape relative links from XML sitemap URL' + } + ], optional: true, additionalParams: true }, { - label: 'Web Scrap Links Limit', + label: 'Get Relative Links Limit', name: 'limit', type: 'number', - default: 10, optional: true, - additionalParams: true + additionalParams: true, + description: + 'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.', + warning: `Retreiving all links might take long time, and all links will be upserted again if the flow's state changed (eg: different URL, chunk size, etc)` }, { label: 'Metadata', @@ -62,7 +77,7 @@ class Cheerio_DocumentLoaders implements INode { async init(nodeData: INodeData): Promise { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const metadata = nodeData.inputs?.metadata - const webScrap = nodeData.inputs?.webScrap as boolean + const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string let limit = nodeData.inputs?.limit as string let url = nodeData.inputs?.url as string @@ -71,25 +86,34 @@ class Cheerio_DocumentLoaders implements INode { throw new Error('Invalid URL') } - const cheerioLoader = async (url: string): Promise => { - let docs = [] - const loader = new CheerioWebBaseLoader(url) - if (textSplitter) { - docs = await loader.loadAndSplit(textSplitter) - } else { - docs = await loader.load() + async function cheerioLoader(url: string): Promise { + try { + let docs = [] + const loader = new CheerioWebBaseLoader(url) + if (textSplitter) { + docs = await loader.loadAndSplit(textSplitter) + } else { + docs = await loader.load() + } + return docs + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error in CheerioWebBaseLoader: ${err.message}, on page: ${url}`) } - return docs } - let availableUrls: string[] let docs = [] - if (webScrap) { + if (relativeLinksMethod) { + if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) if (!limit) limit = '10' - availableUrls = await getAvailableURLs(url, parseInt(limit)) - for (let i = 0; i < availableUrls.length; i++) { - docs.push(...(await cheerioLoader(availableUrls[i]))) + else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0') + const pages: string[] = + relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit)) + if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`) + if (!pages || pages.length === 0) throw new Error('No relative links found') + for (const page of pages) { + docs.push(...(await cheerioLoader(page))) } + if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`) } else { docs = await cheerioLoader(url) } diff --git a/packages/components/nodes/documentloaders/Playwright/Playwright.ts b/packages/components/nodes/documentloaders/Playwright/Playwright.ts index 6b7790af163..73a3e290868 100644 --- a/packages/components/nodes/documentloaders/Playwright/Playwright.ts +++ b/packages/components/nodes/documentloaders/Playwright/Playwright.ts @@ -2,7 +2,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { TextSplitter } from 'langchain/text_splitter' import { PlaywrightWebBaseLoader } from 'langchain/document_loaders/web/playwright' import { test } from 'linkifyjs' -import { getAvailableURLs } from '../../../src' +import { webCrawl, xmlScrape } from '../../../src' class Playwright_DocumentLoaders implements INode { label: string @@ -35,19 +35,34 @@ class Playwright_DocumentLoaders implements INode { optional: true }, { - label: 'Web Scrap for Relative Links', - name: 'webScrap', - type: 'boolean', + label: 'Get Relative Links Method', + name: 'relativeLinksMethod', + type: 'options', + description: 'Select a method to retrieve relative links', + options: [ + { + label: 'Web Crawl', + name: 'webCrawl', + description: 'Crawl relative links from HTML URL' + }, + { + label: 'Scrape XML Sitemap', + name: 'scrapeXMLSitemap', + description: 'Scrape relative links from XML sitemap URL' + } + ], optional: true, additionalParams: true }, { - label: 'Web Scrap Links Limit', + label: 'Get Relative Links Limit', name: 'limit', type: 'number', - default: 10, optional: true, - additionalParams: true + additionalParams: true, + description: + 'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.', + warning: `Retreiving all links might take long time, and all links will be upserted again if the flow's state changed (eg: different URL, chunk size, etc)` }, { label: 'Metadata', @@ -62,7 +77,7 @@ class Playwright_DocumentLoaders implements INode { async init(nodeData: INodeData): Promise { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const metadata = nodeData.inputs?.metadata - const webScrap = nodeData.inputs?.webScrap as boolean + const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string let limit = nodeData.inputs?.limit as string let url = nodeData.inputs?.url as string @@ -71,25 +86,34 @@ class Playwright_DocumentLoaders implements INode { throw new Error('Invalid URL') } - const playwrightLoader = async (url: string): Promise => { - let docs = [] - const loader = new PlaywrightWebBaseLoader(url) - if (textSplitter) { - docs = await loader.loadAndSplit(textSplitter) - } else { - docs = await loader.load() + async function playwrightLoader(url: string): Promise { + try { + let docs = [] + const loader = new PlaywrightWebBaseLoader(url) + if (textSplitter) { + docs = await loader.loadAndSplit(textSplitter) + } else { + docs = await loader.load() + } + return docs + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error in PlaywrightWebBaseLoader: ${err.message}, on page: ${url}`) } - return docs } - let availableUrls: string[] let docs = [] - if (webScrap) { + if (relativeLinksMethod) { + if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) if (!limit) limit = '10' - availableUrls = await getAvailableURLs(url, parseInt(limit)) - for (let i = 0; i < availableUrls.length; i++) { - docs.push(...(await playwrightLoader(availableUrls[i]))) + else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0') + const pages: string[] = + relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit)) + if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`) + if (!pages || pages.length === 0) throw new Error('No relative links found') + for (const page of pages) { + docs.push(...(await playwrightLoader(page))) } + if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`) } else { docs = await playwrightLoader(url) } diff --git a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts index bc1bc9ed628..014845d2d9a 100644 --- a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts +++ b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts @@ -2,7 +2,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { TextSplitter } from 'langchain/text_splitter' import { PuppeteerWebBaseLoader } from 'langchain/document_loaders/web/puppeteer' import { test } from 'linkifyjs' -import { getAvailableURLs } from '../../../src' +import { webCrawl, xmlScrape } from '../../../src' class Puppeteer_DocumentLoaders implements INode { label: string @@ -35,19 +35,34 @@ class Puppeteer_DocumentLoaders implements INode { optional: true }, { - label: 'Web Scrape for Relative Links', - name: 'webScrape', - type: 'boolean', + label: 'Get Relative Links Method', + name: 'relativeLinksMethod', + type: 'options', + description: 'Select a method to retrieve relative links', + options: [ + { + label: 'Web Crawl', + name: 'webCrawl', + description: 'Crawl relative links from HTML URL' + }, + { + label: 'Scrape XML Sitemap', + name: 'scrapeXMLSitemap', + description: 'Scrape relative links from XML sitemap URL' + } + ], optional: true, additionalParams: true }, { - label: 'Web Scrape Links Limit', + label: 'Get Relative Links Limit', name: 'limit', type: 'number', - default: 10, optional: true, - additionalParams: true + additionalParams: true, + description: + 'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.', + warning: `Retreiving all links might take long time, and all links will be upserted again if the flow's state changed (eg: different URL, chunk size, etc)` }, { label: 'Metadata', @@ -62,7 +77,7 @@ class Puppeteer_DocumentLoaders implements INode { async init(nodeData: INodeData): Promise { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const metadata = nodeData.inputs?.metadata - const webScrape = nodeData.inputs?.webScrape as boolean + const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string let limit = nodeData.inputs?.limit as string let url = nodeData.inputs?.url as string @@ -71,35 +86,39 @@ class Puppeteer_DocumentLoaders implements INode { throw new Error('Invalid URL') } - const puppeteerLoader = async (url: string): Promise => { - let docs = [] - const loader = new PuppeteerWebBaseLoader(url, { - launchOptions: { - args: ['--no-sandbox'], - headless: 'new' + async function puppeteerLoader(url: string): Promise { + try { + let docs = [] + const loader = new PuppeteerWebBaseLoader(url, { + launchOptions: { + args: ['--no-sandbox'], + headless: 'new' + } + }) + if (textSplitter) { + docs = await loader.loadAndSplit(textSplitter) + } else { + docs = await loader.load() } - }) - if (textSplitter) { - docs = await loader.loadAndSplit(textSplitter) - } else { - docs = await loader.load() + return docs + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error in PuppeteerWebBaseLoader: ${err.message}, on page: ${url}`) } - return docs } - let availableUrls: string[] let docs = [] - if (webScrape) { + if (relativeLinksMethod) { + if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) if (!limit) limit = '10' - availableUrls = await getAvailableURLs(url, parseInt(limit)) - for (let i = 0; i < availableUrls.length; i++) { - try { - docs.push(...(await puppeteerLoader(availableUrls[i]))) - } catch (error) { - console.error('Error loading url with puppeteer. URL: ', availableUrls[i], 'Error: ', error) - continue - } + else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0') + const pages: string[] = + relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit)) + if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`) + if (!pages || pages.length === 0) throw new Error('No relative links found') + for (const page of pages) { + docs.push(...(await puppeteerLoader(page))) } + if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`) } else { docs = await puppeteerLoader(url) } diff --git a/packages/components/package.json b/packages/components/package.json index 775d6e946e2..684166613de 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -24,6 +24,7 @@ "@qdrant/js-client-rest": "^1.2.2", "@supabase/supabase-js": "^2.21.0", "@types/js-yaml": "^4.0.5", + "@types/jsdom": "^21.1.1", "axios": "^0.27.2", "cheerio": "^1.0.0-rc.12", "chromadb": "^1.4.2", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index f8a6fd58bcd..d9233e490de 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -57,6 +57,7 @@ export interface INodeParams { type: NodeParamsType | string default?: CommonType | ICommonObject | ICommonObject[] description?: string + warning?: string options?: Array optional?: boolean | INodeDisplay rows?: number diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index c87b0831c76..e1399404c83 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -2,6 +2,7 @@ import axios from 'axios' import { load } from 'cheerio' import * as fs from 'fs' import * as path from 'path' +import { JSDOM } from 'jsdom' import { BaseCallbackHandler } from 'langchain/callbacks' import { Server } from 'socket.io' import { ChainValues } from 'langchain/dist/schema' @@ -202,6 +203,140 @@ export const getAvailableURLs = async (url: string, limit: number) => { } /** + * Search for href through htmlBody string + */ +function getURLsFromHTML(htmlBody: string, baseURL: string): string[] { + const dom = new JSDOM(htmlBody) + const linkElements = dom.window.document.querySelectorAll('a') + const urls: string[] = [] + for (const linkElement of linkElements) { + if (linkElement.href.slice(0, 1) === '/') { + try { + const urlObj = new URL(baseURL + linkElement.href) + urls.push(urlObj.href) //relative + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error with relative url: ${err.message}`) + continue + } + } else { + try { + const urlObj = new URL(linkElement.href) + urls.push(urlObj.href) //absolute + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error with absolute url: ${err.message}`) + continue + } + } + } + return urls +} + +/** + * Normalize URL to prevent crawling the same page + */ +function normalizeURL(urlString: string): string { + const urlObj = new URL(urlString) + const hostPath = urlObj.hostname + urlObj.pathname + if (hostPath.length > 0 && hostPath.slice(-1) == '/') { + // handling trailing slash + return hostPath.slice(0, -1) + } + return hostPath +} + +/** + * Recursive crawl using normalizeURL and getURLsFromHTML + */ +async function crawl(baseURL: string, currentURL: string, pages: string[], limit: number): Promise { + const baseURLObj = new URL(baseURL) + const currentURLObj = new URL(currentURL) + + if (limit !== 0 && pages.length === limit) return pages + + if (baseURLObj.hostname !== currentURLObj.hostname) return pages + + const normalizeCurrentURL = baseURLObj.protocol + '//' + normalizeURL(currentURL) + if (pages.includes(normalizeCurrentURL)) { + return pages + } + + pages.push(normalizeCurrentURL) + + if (process.env.DEBUG === 'true') console.info(`actively crawling ${currentURL}`) + try { + const resp = await fetch(currentURL) + + if (resp.status > 399) { + if (process.env.DEBUG === 'true') console.error(`error in fetch with status code: ${resp.status}, on page: ${currentURL}`) + return pages + } + + const contentType: string | null = resp.headers.get('content-type') + if ((contentType && !contentType.includes('text/html')) || !contentType) { + if (process.env.DEBUG === 'true') console.error(`non html response, content type: ${contentType}, on page: ${currentURL}`) + return pages + } + + const htmlBody = await resp.text() + const nextURLs = getURLsFromHTML(htmlBody, baseURL) + for (const nextURL of nextURLs) { + pages = await crawl(baseURL, nextURL, pages, limit) + } + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error in fetch url: ${err.message}, on page: ${currentURL}`) + } + return pages +} + +/** + * Prep URL before passing into recursive carwl function + */ +export async function webCrawl(stringURL: string, limit: number): Promise { + const URLObj = new URL(stringURL) + const modifyURL = stringURL.slice(-1) === '/' ? stringURL.slice(0, -1) : stringURL + return await crawl(URLObj.protocol + '//' + URLObj.hostname, modifyURL, [], limit) +} + +export function getURLsFromXML(xmlBody: string, limit: number): string[] { + const dom = new JSDOM(xmlBody, { contentType: 'text/xml' }) + const linkElements = dom.window.document.querySelectorAll('url') + const urls: string[] = [] + for (const linkElement of linkElements) { + const locElement = linkElement.querySelector('loc') + if (limit !== 0 && urls.length === limit) break + if (locElement?.textContent) { + urls.push(locElement.textContent) + } + } + return urls +} + +export async function xmlScrape(currentURL: string, limit: number): Promise { + let urls: string[] = [] + if (process.env.DEBUG === 'true') console.info(`actively scarping ${currentURL}`) + try { + const resp = await fetch(currentURL) + + if (resp.status > 399) { + if (process.env.DEBUG === 'true') console.error(`error in fetch with status code: ${resp.status}, on page: ${currentURL}`) + return urls + } + + const contentType: string | null = resp.headers.get('content-type') + if ((contentType && !contentType.includes('application/xml')) || !contentType) { + if (process.env.DEBUG === 'true') console.error(`non xml response, content type: ${contentType}, on page: ${currentURL}`) + return urls + } + + const xmlBody = await resp.text() + urls = getURLsFromXML(xmlBody, limit) + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`error in fetch url: ${err.message}, on page: ${currentURL}`) + } + return urls +} + +/* * Get env variables * @param {string} url * @param {number} limit diff --git a/packages/ui/src/views/canvas/NodeInputHandler.js b/packages/ui/src/views/canvas/NodeInputHandler.js index 2d96bcb582c..ba72a4cef3e 100644 --- a/packages/ui/src/views/canvas/NodeInputHandler.js +++ b/packages/ui/src/views/canvas/NodeInputHandler.js @@ -7,7 +7,7 @@ import { useSelector } from 'react-redux' import { useTheme, styled } from '@mui/material/styles' import { Box, Typography, Tooltip, IconButton, Button } from '@mui/material' import { tooltipClasses } from '@mui/material/Tooltip' -import { IconArrowsMaximize, IconEdit } from '@tabler/icons' +import { IconArrowsMaximize, IconEdit, IconAlertTriangle } from '@tabler/icons' // project import import { Dropdown } from 'ui-component/dropdown/Dropdown' @@ -210,6 +210,22 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA )} + {inputParam.warning && ( +
+ + {inputParam.warning} +
+ )} {inputParam.type === 'file' && (